Commit 466ed6b1 authored by Andrey Filippov's avatar Andrey Filippov

LWIR-related changes

parent ddd193c9
...@@ -180,7 +180,7 @@ def writeTFRecordsFromImageSet( ...@@ -180,7 +180,7 @@ def writeTFRecordsFromImageSet(
extra = np.concatenate(( extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1), img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_STR].reshape(-1,1), img_gt_aux.image[...,ijt.IJFGBG.FG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1), img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1), img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1) img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
'''
** Kind of obsolete now, can be used for testing **
Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
'''
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
meta_graph_source = files["inference"]+'.meta'
print("Model restore: using conventionally saved model, but saving Saved Model for the next run")
print("MetaGraph source = "+str(meta_graph_source))
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
infer_saver.restore(sess, files["inference"])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
"""
Save MetaGraph to Saved_Model in *.pb (protocol buffer) format to
be able to use from Java
"""
# force clean
shutil.rmtree(dirs['exportdir'], ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(dirs['exportdir'])
builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
builder.save(False) # True = *.pbtxt, False = *.pb
if lf:
lf.close()
writer.close()
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
# Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
# TODO: Updatew for LWIR !
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
if not use_saved_model:
print("ERROR: Saved_Model not found. Run previous script to create it.")
sys.exit()
meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
#infer_saver.restore(sess, dirs['exportdir'])
#infer_saver.restore(sess, files["inference"])
infer_saver.restore(sess,dirs['exportdir']+'/variables/variables')
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
if lf:
lf.close()
writer.close()
...@@ -20,7 +20,7 @@ import tensorflow as tf ...@@ -20,7 +20,7 @@ import tensorflow as tf
qsf.TIME_START = time.time() qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 20 # 324 # tiles per image row #IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1 DEBUG_LEVEL= 1
try: try:
...@@ -59,6 +59,8 @@ CHECKPOINT_PERIOD = None ...@@ -59,6 +59,8 @@ CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2 TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None TEST_TITLES = None
LOGFILE="results.txt" LOGFILE="results.txt"
IMG_WIDTH = 20
IMG_HEIGHT = 15
""" """
Next gets globals from the config file Next gets globals from the config file
""" """
...@@ -70,8 +72,8 @@ qsf.setCorr2Limits(CORR2D_LIMITS) # limit min/max 2d correlation tiles values ...@@ -70,8 +72,8 @@ qsf.setCorr2Limits(CORR2D_LIMITS) # limit min/max 2d correlation tiles values
#exit(0) #exit(0)
WIDTH = 20 # 324 #WIDTH = 20 # 324
HEIGHT = 15 # 242 #HEIGHT = 15 # 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81 TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324 FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
...@@ -93,11 +95,6 @@ NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2] ...@@ -93,11 +95,6 @@ NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels # Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
############################################################################## ##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1) cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1) center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
...@@ -130,7 +127,7 @@ qsf.evaluateAllResults(result_files = files['result'], ...@@ -130,7 +127,7 @@ qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY, absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS, cluster_radius = CLUSTER_RADIUS,
fgbg_mode= FGBG_MODE, fgbg_mode= FGBG_MODE,
labels = SLICE_LABELS, labels = qsf.SLICE_LABELS,
logpath= LOGPATH) logpath= LOGPATH)
image_data = qsf.initImageData( image_data = qsf.initImageData(
...@@ -328,8 +325,8 @@ with tf.name_scope('epoch_average'): ...@@ -328,8 +325,8 @@ with tf.name_scope('epoch_average'):
tf.compat.v1.summary.scalar("sq_diff_epoch", tf_ph_sq_diff) tf.compat.v1.summary.scalar("sq_diff_epoch", tf_ph_sq_diff)
tf.compat.v1.summary.scalar("gtvar_diff", tf_gtvar_diff) tf.compat.v1.summary.scalar("gtvar_diff", tf_gtvar_diff)
tf.compat.v1.summary.scalar("img_test0", tf_img_test0) tf.compat.v1.summary.scalar("Disparity error", tf_img_test0)
tf.compat.v1.summary.scalar("img_test9", tf_img_test9) tf.compat.v1.summary.scalar("NN gain over heuristic", tf_img_test9)
trainable_vars= tf.trainable_variables() trainable_vars= tf.trainable_variables()
lr= tf.compat.v1.placeholder(tf.float32) lr= tf.compat.v1.placeholder(tf.float32)
...@@ -425,7 +422,7 @@ with tf.Session() as sess: ...@@ -425,7 +422,7 @@ with tf.Session() as sess:
gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32) gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train = 0.0 gtvar_train = 0.0
gtvar_test = 0.0 gtvar_test = 0.0
img_gain_test0 = 1.0 img_gain_test0 = 0.2
img_gain_test9 = 1.0 img_gain_test9 = 1.0
thr=None thr=None
...@@ -603,14 +600,14 @@ with tf.Session() as sess: ...@@ -603,14 +600,14 @@ with tf.Session() as sess:
# Read the full image # Read the full image
################################################### ###################################################
## test_summaries_img = [0.0]*len(ind_img) # datasets_img) ## test_summaries_img = [0.0]*len(ind_img) # datasets_img)
disp_out= np.empty((WIDTH*HEIGHT), dtype=np.float32) disp_out= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((WIDTH*HEIGHT), dtype=np.float32) dbg_cost_nw= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((WIDTH*HEIGHT), dtype=np.float32) dbg_cost_w= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_d= np.empty((WIDTH*HEIGHT), dtype=np.float32) dbg_d= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32) dbg_avg_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32) dbg_gt_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_offs = np.empty((WIDTH*HEIGHT), dtype=np.float32) dbg_offs = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
for ntest in ind_img: # datasets_img): for ntest in ind_img: # datasets_img):
dataset_img = qsf.readImageData( dataset_img = qsf.readImageData(
...@@ -676,27 +673,27 @@ with tf.Session() as sess: ...@@ -676,27 +673,27 @@ with tf.Session() as sess:
dbg_avg_disparity.reshape(-1,1), dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1), dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1), dbg_offs.reshape(-1,1),
extra, # len 3..6, extra, # len 3..6, #adding extra data layers
#adding extra data layers
],1) ],1)
num_slices = rslt.shape[1] num_slices = rslt.shape[1]
np.save( np.save(
result_file, result_file,
rslt.reshape(HEIGHT,WIDTH,-1)) rslt.reshape(IMG_HEIGHT, IMG_WIDTH,-1))
eval_rslt = qsf.eval_results( eval_rslt = qsf.eval_results(
result_file, result_file,
ABSOLUTE_DISPARITY, ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS, radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1, last_fgbg_mode = 1,
logfile=lf) logfile=lf)
img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1] # img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
img_gain_test0 = eval_rslt[9][1]
img_gain_test9 = eval_rslt[9][0]/eval_rslt[9][1] img_gain_test9 = eval_rslt[9][0]/eval_rslt[9][1]
if SAVE_TIFFS: if SAVE_TIFFS:
qsf.result_npy_to_tiff( qsf.result_npy_to_tiff(
result_file, result_file,
ABSOLUTE_DISPARITY, ABSOLUTE_DISPARITY,
fix_nan = True, fix_nan = True,
labels=SLICE_LABELS[0:num_slices], labels=qsf.SLICE_LABELS[0:num_slices],
logfile=lf) logfile=lf)
""" """
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment