Commit d8e8db9f authored by Andrey Filippov's avatar Andrey Filippov

Made inference from the saved model

parent 3b6137be
...@@ -4,15 +4,14 @@ __copyright__ = "Copyright 2018, Elphel, Inc." ...@@ -4,15 +4,14 @@ __copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+" __license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com" __email__ = "andrey@elphel.com"
#python3 nn_ds_neibs17.py /home/eyesis/x3d_data/data_sets/conf/qcstereo_conf13.xml /home/eyesis/x3d_data/data_sets #Builds (and saved) inference model from trained by nn_ds_neibs21.py
#Model and weights are used by the inference-only infer_qcds_graph.py
import os import os
import sys import sys
import numpy as np import numpy as np
import time import time
import shutil import shutil
from threading import Thread
import qcstereo_network import qcstereo_network
import qcstereo_losses
import qcstereo_functions as qsf import qcstereo_functions as qsf
import tensorflow as tf import tensorflow as tf
from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import resource_variable_ops
...@@ -120,26 +119,10 @@ image_data = qsf.initImageData( # just use image_data[0] ...@@ -120,26 +119,10 @@ image_data = qsf.initImageData( # just use image_data[0]
infer = True, infer = True,
keep_gt = True) # to generate same output files keep_gt = True) # to generate same output files
ph_corr2d = tf.placeholder(np.float32, (None,FEATURES_PER_TILE)) ph_corr2d = tf.placeholder(np.float32, (None,FEATURES_PER_TILE), name = 'ph_corr2d')
ph_target_disparity = tf.placeholder(np.float32, (None,1)) ph_target_disparity = tf.placeholder(np.float32, (None,1), name = 'ph_target_disparity')
#ph_tile_yx = tf.placeholder(np.int32, (None,2)) #tile_y, tile_x ph_ntile = tf.placeholder(np.int32, (None,), name = 'ph_ntile') #nTile
ph_ntile = tf.placeholder(np.int32, (None,)) #nTile ph_ntile_out = tf.placeholder(np.int32, (None,), name = 'ph_ntile_out') #which tiles should be calculated in stage2
ph_ntile_out = tf.placeholder(np.int32, (None,)) #which tiles should be calculated in stage2
"""
dataset_img = tf.data.Dataset.from_tensor_slices({
"corr2d": corr2d_placeholder,
"target_disparity": target_disparity_placeholder,
"xy": tile_xy_placeholder})
dataset_img = dataset_img.prefetch(image_data[0]['corr2d'].shape[0])
iterator_img = dataset_img.make_initializable_iterator()
next_element_tt = iterator_img.get_next()
"""
#No need to use datasets here = whole input even for the full frame( ~100MB) should fit
#corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1])],2) #corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1])],2)
tf_intile325 = tf.concat([ph_corr2d, ph_target_disparity],axis=1,name="tf_intile325") # [?,325] tf_intile325 = tf.concat([ph_corr2d, ph_target_disparity],axis=1,name="tf_intile325") # [?,325]
...@@ -156,7 +139,8 @@ Probably ResourceVariable is not needed here because of the tf.scatter_update() ...@@ -156,7 +139,8 @@ Probably ResourceVariable is not needed here because of the tf.scatter_update()
If collection is not provided, it defaults to [GraphKeys.GLOBAL_VARIABLES], and that in turn fails saver.restore() as this variable was not available in the trained model If collection is not provided, it defaults to [GraphKeys.GLOBAL_VARIABLES], and that in turn fails saver.restore() as this variable was not available in the trained model
""" """
rv_stage1_out = resource_variable_ops.ResourceVariable( #rv_stage1_out = resource_variable_ops.ResourceVariable(
rv_stage1_out = tf.Variable(
np.zeros([HEIGHT * WIDTH, NN_LAYOUT1[-1]]), np.zeros([HEIGHT * WIDTH, NN_LAYOUT1[-1]]),
## collections = [], ## collections = [],
collections = [GraphKeys.LOCAL_VARIABLES],# Works, available with tf.local_variables() collections = [GraphKeys.LOCAL_VARIABLES],# Works, available with tf.local_variables()
...@@ -188,7 +172,7 @@ tf_stage2_in_sparse = tf.gather(tf_stage2_in, indices= ph_ntile_out, axis=0, nam ...@@ -188,7 +172,7 @@ tf_stage2_in_sparse = tf.gather(tf_stage2_in, indices= ph_ntile_out, axis=0, nam
#ext=np.concatenate([aextv[:1,:,:]]*1 + [aextv] + [aextv[-1:,:,:]]*3,axis = 0) #ext=np.concatenate([aextv[:1,:,:]]*1 + [aextv] + [aextv[-1:,:,:]]*3,axis = 0)
with tf.name_scope("Siam_net"): # to have the same scope for weight/biases? with tf.name_scope("Disparity_net"): # to have the same scope for weight/biases?
ns, _ = qcstereo_network.network_sub(tf_intile325, ns, _ = qcstereo_network.network_sub(tf_intile325,
input_global = [None,ph_target_disparity][SPREAD_CONVERGENCE], # input_global[:,i,:], input_global = [None,ph_target_disparity][SPREAD_CONVERGENCE], # input_global[:,i,:],
layout= NN_LAYOUT1, layout= NN_LAYOUT1,
...@@ -203,24 +187,41 @@ with tf.name_scope("Siam_net"): # to have the same scope for weight/biases? ...@@ -203,24 +187,41 @@ with tf.name_scope("Siam_net"): # to have the same scope for weight/biases?
with tf.control_dependencies([update]): with tf.control_dependencies([update]):
stage1done = tf.constant(1, dtype=tf.int32, name="stage1done") stage1done = tf.constant(1, dtype=tf.int32, name="stage1done")
pass pass
stage2_out_sparse = qcstereo_network.network_inter ( stage2_out_sparse0 = qcstereo_network.network_inter (
input_tensor = tf_stage2_in_sparse, input_tensor = tf_stage2_in_sparse,
input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster) input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = NN_LAYOUT2, layout = NN_LAYOUT2,
reuse = False, reuse = False,
use_confidence = False) use_confidence = False)
stage2_out_sparse = tf.identity(stage2_out_sparse0, name = 'stage2_out_sparse')
if not USE_SPARSE_ONLY: #Does it reduce the graph size? if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = qcstereo_network.network_inter ( stage2_out_full0 = qcstereo_network.network_inter (
input_tensor = tf_stage2_in, input_tensor = tf_stage2_in,
input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster) input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = NN_LAYOUT2, layout = NN_LAYOUT2,
reuse = True, reuse = True,
use_confidence = False) use_confidence = False)
stage2_out_full = tf.identity(stage2_out_full0, name = 'stage2_out_full')
pass pass
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
"""
saver=tf.train.Saver() This is needed if ResourceVariable is used - then i/o tensors names somehow disappeared
and were replaced by 'Placeholder_*'
collection_io = 'collection_io'
tf.add_to_collection(collection_io, ph_corr2d)
tf.add_to_collection(collection_io, ph_target_disparity)
tf.add_to_collection(collection_io, ph_ntile)
tf.add_to_collection(collection_io, ph_ntile_out)
tf.add_to_collection(collection_io, stage1done)
tf.add_to_collection(collection_io, stage2_out_sparse)
"""
##saver=tf.train.Saver()
saver=tf.train.Saver(tf.global_variables())
saver_def = saver.as_saver_def()
pass
""" """
saver_def = saver.as_saver_def() saver_def = saver.as_saver_def()
...@@ -238,11 +239,18 @@ saver_def.restore_op_name= save/restore_all ...@@ -238,11 +239,18 @@ saver_def.restore_op_name= save/restore_all
saver_def.save_tensor_name= save/control_dependency:0 saver_def.save_tensor_name= save/control_dependency:0
print(saver.save(sess, files["checkpoints"])) print(saver.save(sess, files["checkpoints"]))
""" """
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess: with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) sess.run(tf.local_variables_initializer())
saver.restore(sess, files["checkpoints"]) saver.restore(sess, files["checkpoints"])
saver.save(sess, files["inference"]) #TODO: move to different subdir
merged = tf.summary.merge_all() merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph) writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None lf = None
...@@ -288,12 +296,6 @@ with tf.Session() as sess: ...@@ -288,12 +296,6 @@ with tf.Session() as sess:
[disp_out.reshape(-1,1), [disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest], dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest], dataset_img['gtruths'], # gtruths[ntest],
# dbg_cost_nw.reshape(-1,1),
# dbg_cost_w.reshape(-1,1),
# dbg_d.reshape(-1,1),
# dbg_avg_disparity.reshape(-1,1),
# dbg_gt_disparity.reshape(-1,1),
# dbg_offs.reshape(-1,1)
],1) ],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1)) np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
...@@ -305,7 +307,8 @@ with tf.Session() as sess: ...@@ -305,7 +307,8 @@ with tf.Session() as sess:
""" """
image_data[nimg] = None image_data[nimg] = None
meta_graph_def = tf.train.export_meta_graph(files["inference"]+'.meta')
if lf: if lf:
lf.close() lf.close()
......
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
# Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
infer_saver = tf.train.import_meta_graph(files["inference"]+'.meta')
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
infer_saver.restore(sess, files["inference"]) # after initializers, of course
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # [?,324)
img_target = dataset_img['target_disparity'] # [?,324)
img_ntile = dataset_img['ntile'].reshape([-1])
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
if lf:
lf.close()
writer.close()
...@@ -131,6 +131,10 @@ ...@@ -131,6 +131,10 @@
<checkpoints> <checkpoints>
"tf_data_5x5_main_13_heur/checkpoints" "tf_data_5x5_main_13_heur/checkpoints"
</checkpoints> </checkpoints>
<inference>
"tf_data_5x5_main_13_heur/inference"
</inference>
</directories> </directories>
<files> <files>
<train_lvar> <train_lvar>
...@@ -330,6 +334,9 @@ ...@@ -330,6 +334,9 @@
<checkpoints> <checkpoints>
"model_checkpoints" "model_checkpoints"
</checkpoints> </checkpoints>
<inference>
"model"
</inference>
</files> </files>
</properties> </properties>
......
...@@ -118,11 +118,19 @@ def prepareFiles(dirs, files, suffix): ...@@ -118,11 +118,19 @@ def prepareFiles(dirs, files, suffix):
for i, path in enumerate(files['images']): for i, path in enumerate(files['images']):
result_files.append(os.path.join(dirs['result'], path+"_"+suffix+'.npy')) result_files.append(os.path.join(dirs['result'], path+"_"+suffix+'.npy'))
files['result'] = result_files files['result'] = result_files
if not 'checkpoints' in files: if not 'checkpoints' in files:
files['checkpoints'] = 'checkpoints' files['checkpoints'] = 'checkpoints'
if not 'checkpoints' in dirs: if not 'checkpoints' in dirs:
dirs['checkpoints'] = dirs['result'] dirs['checkpoints'] = dirs['result']
files['checkpoints'] = os.path.join(dirs['checkpoints'], files['checkpoints']) files['checkpoints'] = os.path.join(dirs['checkpoints'], files['checkpoints'])
if not 'inference' in files:
files['inference'] = 'inference'
if not 'inference' in dirs:
dirs['inference'] = dirs['result']
files['inference'] = os.path.join(dirs['inference'], files['inference'])
if not 'figures' in dirs: if not 'figures' in dirs:
dirs['figures'] = os.path.join(dirs['result'],"figs") dirs['figures'] = os.path.join(dirs['result'],"figs")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment