Commit ddd193c9 authored by Andrey Filippov's avatar Andrey Filippov

more lwir-related updates

parent e3d871ba
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -1693,7 +1693,7 @@ if __name__ == "__main__": ...@@ -1693,7 +1693,7 @@ if __name__ == "__main__":
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8 NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TEST = 3 # 0 - average, 1 - FG, 2 - BG, 3 - AUX FGBGMODE_TESTS = [1,3] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
...@@ -1725,12 +1725,12 @@ if __name__ == "__main__": ...@@ -1725,12 +1725,12 @@ if __name__ == "__main__":
''' Prepare full image for testing ''' ''' Prepare full image for testing '''
for model_ml_path in test_sets: for model_ml_path in test_sets:
writeTFRecordsFromImageSet( for fgbgmode_test in FGBGMODE_TESTS:
model_ml_path, # model/version/ml_dir writeTFRecordsFromImageSet(
FGBGMODE_TEST, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity model_ml_path, # model/version/ml_dir
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
pathTFR) # TFR directory RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
# disp_bins = 20, # disp_bins = 20,
......
...@@ -49,6 +49,7 @@ TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5 ...@@ -49,6 +49,7 @@ TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5 ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5 SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None CLUSTER_RADIUS = None
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5 PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4 USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5 LR600,LR400,LR200,LR100,LR = [None]*5
......
...@@ -47,8 +47,10 @@ Defined in config file ...@@ -47,8 +47,10 @@ Defined in config file
""" """
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5 TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5 ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
CORR2D_LIMITS = [None, None]
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5 SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None CLUSTER_RADIUS = None
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5 PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4 USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5 LR600,LR400,LR200,LR100,LR = [None]*5
...@@ -64,7 +66,7 @@ globals().update(parameters) ...@@ -64,7 +66,7 @@ globals().update(parameters)
TRAIN_BUFFER_SIZE = TRAIN_BUFFER_GPU * TRAIN_BUFFER_CPU # in merged (quad) batches TRAIN_BUFFER_SIZE = TRAIN_BUFFER_GPU * TRAIN_BUFFER_CPU # in merged (quad) batches
qsf.setCorr2Limits(CORR2D_LIMITS) # limit min/max 2d correlation tiles values
#exit(0) #exit(0)
...@@ -91,9 +93,10 @@ NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2] ...@@ -91,9 +93,10 @@ NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels # Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength", SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost", "cutcorn_cost_nw","cutcorn_cost",
"gt-avg_dist","avg8_disp","gt_disp","out-avg"] "gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
############################################################################## ##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1) cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
...@@ -126,6 +129,7 @@ if not USE_PARTIALS: ...@@ -126,6 +129,7 @@ if not USE_PARTIALS:
qsf.evaluateAllResults(result_files = files['result'], qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY, absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS, cluster_radius = CLUSTER_RADIUS,
fgbg_mode= FGBG_MODE,
labels = SLICE_LABELS, labels = SLICE_LABELS,
logpath= LOGPATH) logpath= LOGPATH)
...@@ -390,7 +394,7 @@ with tf.Session() as sess: ...@@ -390,7 +394,7 @@ with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) sess.run(tf.local_variables_initializer())
merged = tf.summary.merge_all() merged = tf.compat.v1.summary.merge_all()
tt_writers = [] tt_writers = []
for p in TT_PATHS: for p in TT_PATHS:
tt_writers.append(tf.summary.FileWriter(p, sess.graph)) tt_writers.append(tf.summary.FileWriter(p, sess.graph))
...@@ -614,9 +618,9 @@ with tf.Session() as sess: ...@@ -614,9 +618,9 @@ with tf.Session() as sess:
files = files, files = files,
indx = ntest, indx = ntest,
cluster_radius = CLUSTER_RADIUS, cluster_radius = CLUSTER_RADIUS,
tile_layers = TILE_LAYERS, tile_layers = TILE_LAYERS, # 4
tile_side = TILE_SIDE, tile_side = TILE_SIDE, # 9
width = IMG_WIDTH, width = IMG_WIDTH, #160
replace_nans = True) replace_nans = True)
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_img['corr2d'], sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_img['corr2d'],
...@@ -659,24 +663,41 @@ with tf.Session() as sess: ...@@ -659,24 +663,41 @@ with tf.Session() as sess:
os.makedirs(os.path.dirname(result_file)) os.makedirs(os.path.dirname(result_file))
except: except:
pass pass
extra = dataset_img['t_extra']
if extra is None:
extra = np.array([dataset_img['gtruths'].shape[0],0])
rslt = np.concatenate( rslt = np.concatenate(
[disp_out.reshape(-1,1), [disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest], dataset_img['t_disps'], #t_disps[ntest], disp_out.shape[0],BATCH_SIZE
dataset_img['gtruths'], # gtruths[ntest], dataset_img['gtruths'], # gtruths[ntest],
dbg_cost_nw.reshape(-1,1), dbg_cost_nw.reshape(-1,1),
dbg_cost_w.reshape(-1,1), dbg_cost_w.reshape(-1,1),
dbg_d.reshape(-1,1), dbg_d.reshape(-1,1),
dbg_avg_disparity.reshape(-1,1), dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1), dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1)],1) dbg_offs.reshape(-1,1),
extra, # len 3..6,
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1)) #adding extra data layers
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) ],1)
img_gain_test0 = rslt[0][0]/rslt[0][1] num_slices = rslt.shape[1]
img_gain_test9 = rslt[9][0]/rslt[9][1] np.save(
result_file,
rslt.reshape(HEIGHT,WIDTH,-1))
eval_rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1,
logfile=lf)
img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
img_gain_test9 = eval_rslt[9][0]/eval_rslt[9][1]
if SAVE_TIFFS: if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf) qsf.result_npy_to_tiff(
result_file,
ABSOLUTE_DISPARITY,
fix_nan = True,
labels=SLICE_LABELS[0:num_slices],
logfile=lf)
""" """
Remove dataset_img (if it is not [0] to reduce memory footprint Remove dataset_img (if it is not [0] to reduce memory footprint
......
...@@ -11,7 +11,8 @@ import time ...@@ -11,7 +11,8 @@ import time
import imagej_tiffwriter import imagej_tiffwriter
TIME_LAST = 0 TIME_LAST = 0
TIME_START = 0 TIME_START = 0
corr2_limits = None
MARGINS = 2 # disregard errors outside
class bcolors: class bcolors:
HEADER = '\033[95m' HEADER = '\033[95m'
OKBLUE = '\033[94m' OKBLUE = '\033[94m'
...@@ -58,7 +59,11 @@ def parseXmlConfig(conf_file, root_dir): ...@@ -58,7 +59,11 @@ def parseXmlConfig(conf_file, root_dir):
files[p.tag]=eval(p.text.strip()) files[p.tag]=eval(p.text.strip())
dbg_parameters = {} dbg_parameters = {}
for p in root.find('dbg_parameters'): for p in root.find('dbg_parameters'):
dbg_parameters[p.tag]=eval(p.text.strip()) try:
dbg_parameters[p.tag]=eval(p.text.strip())
except:
print("Error in xml - p.tag = %s, p.text.strip()=%s"%(p.tag, p.text.strip()))
continue
return parameters, dirs, files, dbg_parameters return parameters, dirs, files, dbg_parameters
...@@ -312,7 +317,7 @@ def get_lengths( ...@@ -312,7 +317,7 @@ def get_lengths(
cluster_side = 2 * cluster_radius + 1 cluster_side = 2 * cluster_radius + 1
cl = cluster_side * cluster_side * tile_layers * tile_side * tile_side cl = cluster_side * cluster_side * tile_layers * tile_side * tile_side
tl = cluster_side * cluster_side tl = cluster_side * cluster_side
gl = cluster_side * cluster_side * 2 # disparity+strength, resto goes to extra gl = cluster_side * cluster_side * 2 # disparity+strength, rest goes to extra
return cl, tl, gl, cluster_side return cl, tl, gl, cluster_side
...@@ -508,9 +513,16 @@ def readImageData(image_data, ...@@ -508,9 +513,16 @@ def readImageData(image_data,
width) width)
if replace_nans: if replace_nans:
replace_nan([image_data[indx]]) replace_nan([image_data[indx]])
if not (corr2_limits is None):
image_data[indx]['corr2d'] = np.clip(image_data[indx]['corr2d'], corr2_limits[0], corr2_limits[1])
return image_data[indx] return image_data[indx]
def setCorr2Limits(limits):
if not (limits is None) and ((not limits[0] is None) or (not limits[1] is None)):
globals()['corr2_limits'] = limits
else:
globals()['corr2_limits'] = None
def initImageData(files, def initImageData(files,
max_imgs, max_imgs,
cluster_radius, cluster_radius,
...@@ -535,10 +547,10 @@ def initImageData(files, ...@@ -535,10 +547,10 @@ def initImageData(files,
replace_nans, replace_nans,
infer = infer, infer = infer,
keep_gt = keep_gt) keep_gt = keep_gt)
print_time(" Done") print_time(" Done")
return img_data return img_data
def evaluateAllResults(result_files, absolute_disparity, cluster_radius, labels=None, logpath=None): def evaluateAllResults(result_files, absolute_disparity, cluster_radius, labels=None, logpath=None, fgbg_mode=1):
if logpath: if logpath:
lf=open(logpath,"w") lf=open(logpath,"w")
else: else:
...@@ -546,7 +558,7 @@ def evaluateAllResults(result_files, absolute_disparity, cluster_radius, labels= ...@@ -546,7 +558,7 @@ def evaluateAllResults(result_files, absolute_disparity, cluster_radius, labels=
for result_file in result_files: for result_file in result_files:
try: try:
print_time("Reading resuts from "+result_file, end=" ") print_time("Reading resuts from "+result_file, end=" ")
eval_results(result_file, absolute_disparity, radius=cluster_radius, logfile=lf) eval_results(result_file, absolute_disparity, radius=cluster_radius, last_fgbg_mode = fgbg_mode, logfile=lf)
except: except:
print_time(" - does not exist") print_time(" - does not exist")
continue continue
...@@ -686,6 +698,129 @@ def result_npy_to_tiff(npy_path, ...@@ -686,6 +698,129 @@ def result_npy_to_tiff(npy_path,
imagej_tiffwriter.save(tiff_path,data,labels=labels) imagej_tiffwriter.save(tiff_path,data,labels=labels)
def eval_results(rslt_path, absolute, def eval_results(rslt_path, absolute,
min_disp = -0.1, #minimal GT disparity
max_disp = 20.0, # maximal GT disparity
max_ofst_target = 1.0,
max_ofst_result = 1.0,
str_pow = 1.0,
last_fgbg_mode = 1, # 0 - no fgbg filter, 1 exclude tiles with fg/bg, 2 exclude fg/bg tiles and neighbors
radius = 0,
logfile = None):
variants = [[ -0.1, 3.0, 0.5, 0.5, 0.0, 1],
[ -0.1, 3.0, 0.5, 0.5, 1.0, 1],
[ -0.1, 3.0, 0.5, 0.5, 0.0, 2],
[ -0.1, 3.0, 0.5, 0.5, 1.0, 2],
[ -0.1, 3.0, 0.5, 0.5, 1.0, 0],
[ -0.1, 10.0, 5.0, 5.0, 0.0, 1],
[ -0.1, 10.0, 5.0, 5.0, 1.0, 1],
[ -0.1, 10.0, 5.0, 5.0, 1.0, 2],
[ -0.1, 10.0, 5.0, 5.0, 1.0, 0],
[ min_disp, max_disp, max_ofst_target, max_ofst_result, str_pow, last_fgbg_mode]]
stack = np.load(rslt_path)
layers = {
"nn_out_ext": stack[..., 0],
"target_disp": stack[..., 1], # used as target disparity, it is not heuristic data!
"gt_disparity": stack[..., 2],
"gt_strength": stack[..., 3],
"cutcorn_cost_nw": stack[..., 4],
"cutcorn_cost": stack[..., 5],
"gt_avg_dist": stack[..., 6],
"avg8_disp": stack[..., 7],
"gt_disp": stack[..., 8],
"out_avg": stack[..., 9],
"aux_disp": stack[...,10],
"fg_disp": stack[...,11],
"bg_disp": stack[...,12],
"gt_rms": stack[...,13],
"gt_rms_split": stack[...,14],
}
'''
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
MARGINS = 2 # disregard errors outside
'''
fgbg_single = layers["gt_rms"] <= layers["gt_rms_split"]
fgbg_ext = 1
fgbg_single_ext = np.ones((stack.shape[0] + 2 * fgbg_ext, stack.shape[1] + 2 * fgbg_ext),dtype=np.bool)
fgbg_single_ext[fgbg_ext:-fgbg_ext, fgbg_ext:-fgbg_ext] = fgbg_single
for dy in range(2*fgbg_ext+1):
for dx in range(2*fgbg_ext+1):
fgbg_single_ext[dy:dy+fgbg_single.shape[0], dx:dx+fgbg_single.shape[1]] &= fgbg_single
fgbg_single2 = fgbg_single_ext[fgbg_ext:-fgbg_ext,fgbg_ext:-fgbg_ext] #
not_nan = ~np.isnan(layers["nn_out_ext"]) # nn_out_ext
not_nan &= ~np.isnan(layers["target_disp"]) # target_disp
not_nan &= ~np.isnan(layers["gt_disparity"]) # gt_disparity
not_nan &= ~np.isnan(layers["gt_strength"]) # gt_strength
# pessimistic - make not_nan to have no NaN-s in 5x5 clusters. Maybe too strict for LWIR - nothing will remain
if radius > 0:
not_nan_ext = np.zeros((stack.shape[0] + 2*radius,stack.shape[1] + 2 * radius),dtype=np.bool)
not_nan_ext[radius:-radius,radius:-radius] = not_nan
for dy in range(2*radius+1):
for dx in range(2*radius+1):
not_nan_ext[dy:dy+not_nan.shape[0], dx:dx+not_nan.shape[1]] &= not_nan
not_nan = not_nan_ext[radius:-radius,radius:-radius]
wo_margins = np.zeros((stack.shape[0],stack.shape[1]), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
not_nan &= wo_margins
if not absolute:
stack[...,0] += stack[...,1]
nn_disparity = np.nan_to_num(stack[...,0], copy = False)
target_disparity = np.nan_to_num(stack[...,1], copy = False)
heurist_disparity = np.nan_to_num(layers["aux_disp"], copy = False)
gt_disparity = np.nan_to_num(stack[...,2], copy = False)
gt_strength = np.nan_to_num(stack[...,3], copy = False)
rrslt = []
print ("--------------- %s ---------------"%(rslt_path))
if logfile:
print ("--------------- %s ---------------"%(rslt_path), file=logfile)
for min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, fgbg_mode in variants:
good_tiles = not_nan.copy();
if fgbg_mode == 1:
good_tiles &= fgbg_single
elif fgbg_mode == 2:
good_tiles &= fgbg_single2
good_tiles &= (gt_disparity >= min_disparity)
good_tiles &= (gt_disparity <= max_disparity)
# good_tiles &= (target_disparity != gt_disparity)
good_tiles &= (np.abs(heurist_disparity - gt_disparity) <= max_offset_target)
good_tiles &= (np.abs(target_disparity - gt_disparity) <= max_offset_target)
good_tiles &= (np.abs(target_disparity - nn_disparity) <= max_offset_result)
gt_w = gt_strength * good_tiles
if strength_pow > 0: # power (0,0) = 1.0
gt_w = np.power(gt_w,strength_pow)
else:
gt_w = good_tiles * 1.0
sw = gt_w.sum()
diff0 = heurist_disparity - gt_disparity
diff1 = nn_disparity - gt_disparity
diff0_2w = gt_w*diff0*diff0
diff1_2w = gt_w*diff1*diff1
rms0 = np.sqrt(diff0_2w.sum()/sw)
rms1 = np.sqrt(diff1_2w.sum()/sw)
print ("%7.3f<disp<%7.3f, offs_tgt<%5.2f, offs_rslt<%5.2f pwr=%05.3f, fgbg=%1d, rms0=%7.4f, rms1=%7.4f (gain=%7.4f) num good tiles = %5d"%(
min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, fgbg_mode, rms0, rms1, rms0/rms1, good_tiles.sum() ))
if logfile:
print ("%7.3f<disp<%7.3f, offs_tgt<%5.2f, offs_rslt<%5.2f pwr=%05.3f, fgbg=%1d, rms0=%7.4f, rms1=%7.4f (gain=%7.4f) num good tiles = %5d"%(
min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, fgbg_mode, rms0, rms1, rms0/rms1, good_tiles.sum() ),file=logfile)
rrslt.append([rms0,rms1])
return rrslt
def eval_results_eo(rslt_path, absolute,
min_disp = -0.1, #minimal GT disparity min_disp = -0.1, #minimal GT disparity
max_disp = 20.0, # maximal GT disparity max_disp = 20.0, # maximal GT disparity
max_ofst_target = 1.0, max_ofst_target = 1.0,
...@@ -706,10 +841,13 @@ def eval_results(rslt_path, absolute, ...@@ -706,10 +841,13 @@ def eval_results(rslt_path, absolute,
rslt = np.load(rslt_path) rslt = np.load(rslt_path)
not_nan = ~np.isnan(rslt[...,0]) not_nan = ~np.isnan(rslt[...,0]) # nn_out_ext
not_nan &= ~np.isnan(rslt[...,1]) not_nan &= ~np.isnan(rslt[...,1]) # hier_out_ext
not_nan &= ~np.isnan(rslt[...,2]) not_nan &= ~np.isnan(rslt[...,2]) # gt_disparity
not_nan &= ~np.isnan(rslt[...,3]) not_nan &= ~np.isnan(rslt[...,3]) # gt_strength
# pessimistic - make not_nan to have no NaN-s in 5x5 clusters. Maybe too strict for LWIR - nothing will remain
not_nan_ext = np.zeros((rslt.shape[0] + 2*radius,rslt.shape[1] + 2 * radius),dtype=np.bool) not_nan_ext = np.zeros((rslt.shape[0] + 2*radius,rslt.shape[1] + 2 * radius),dtype=np.bool)
not_nan_ext[radius:-radius,radius:-radius] = not_nan not_nan_ext[radius:-radius,radius:-radius] = not_nan
for dy in range(2*radius+1): for dy in range(2*radius+1):
...@@ -723,7 +861,7 @@ def eval_results(rslt_path, absolute, ...@@ -723,7 +861,7 @@ def eval_results(rslt_path, absolute,
target_disparity = np.nan_to_num(rslt[...,1], copy = False) target_disparity = np.nan_to_num(rslt[...,1], copy = False)
gt_disparity = np.nan_to_num(rslt[...,2], copy = False) gt_disparity = np.nan_to_num(rslt[...,2], copy = False)
gt_strength = np.nan_to_num(rslt[...,3], copy = False) gt_strength = np.nan_to_num(rslt[...,3], copy = False)
rslt = [] rrslt = []
print ("--------------- %s ---------------"%(rslt_path)) print ("--------------- %s ---------------"%(rslt_path))
if logfile: if logfile:
print ("--------------- %s ---------------"%(rslt_path), file=logfile) print ("--------------- %s ---------------"%(rslt_path), file=logfile)
...@@ -750,8 +888,10 @@ def eval_results(rslt_path, absolute, ...@@ -750,8 +888,10 @@ def eval_results(rslt_path, absolute,
print ("%7.3f<disp<%7.3f, offs_tgt<%5.2f, offs_rslt<%5.2f pwr=%05.3f, rms0=%7.4f, rms1=%7.4f (gain=%7.4f) num good tiles = %5d"%( print ("%7.3f<disp<%7.3f, offs_tgt<%5.2f, offs_rslt<%5.2f pwr=%05.3f, rms0=%7.4f, rms1=%7.4f (gain=%7.4f) num good tiles = %5d"%(
min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, rms0, rms1, rms0/rms1, good_tiles.sum() ),file=logfile) min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, rms0, rms1, rms0/rms1, good_tiles.sum() ),file=logfile)
rslt.append([rms0,rms1]) rrslt.append([rms0,rms1])
return rslt return rrslt
def concentricSquares(radius): def concentricSquares(radius):
side = 2 * radius + 1 side = 2 * radius + 1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment