Commit 466ed6b1 authored by Andrey Filippov's avatar Andrey Filippov

LWIR-related changes

parent ddd193c9
......@@ -180,7 +180,7 @@ def writeTFRecordsFromImageSet(
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_STR].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
'''
** Kind of obsolete now, can be used for testing **
Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
'''
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
meta_graph_source = files["inference"]+'.meta'
print("Model restore: using conventionally saved model, but saving Saved Model for the next run")
print("MetaGraph source = "+str(meta_graph_source))
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
infer_saver.restore(sess, files["inference"])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
"""
Save MetaGraph to Saved_Model in *.pb (protocol buffer) format to
be able to use from Java
"""
# force clean
shutil.rmtree(dirs['exportdir'], ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(dirs['exportdir'])
builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
builder.save(False) # True = *.pbtxt, False = *.pb
if lf:
lf.close()
writer.close()
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
# Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
# TODO: Updatew for LWIR !
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
if not use_saved_model:
print("ERROR: Saved_Model not found. Run previous script to create it.")
sys.exit()
meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
#infer_saver.restore(sess, dirs['exportdir'])
#infer_saver.restore(sess, files["inference"])
infer_saver.restore(sess,dirs['exportdir']+'/variables/variables')
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
if lf:
lf.close()
writer.close()
......@@ -20,7 +20,7 @@ import tensorflow as tf
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 20 # 324 # tiles per image row
#IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1
try:
......@@ -59,6 +59,8 @@ CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
LOGFILE="results.txt"
IMG_WIDTH = 20
IMG_HEIGHT = 15
"""
Next gets globals from the config file
"""
......@@ -70,8 +72,8 @@ qsf.setCorr2Limits(CORR2D_LIMITS) # limit min/max 2d correlation tiles values
#exit(0)
WIDTH = 20 # 324
HEIGHT = 15 # 242
#WIDTH = 20 # 324
#HEIGHT = 15 # 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
......@@ -93,11 +95,6 @@ NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
......@@ -130,7 +127,7 @@ qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS,
fgbg_mode= FGBG_MODE,
labels = SLICE_LABELS,
labels = qsf.SLICE_LABELS,
logpath= LOGPATH)
image_data = qsf.initImageData(
......@@ -328,8 +325,8 @@ with tf.name_scope('epoch_average'):
tf.compat.v1.summary.scalar("sq_diff_epoch", tf_ph_sq_diff)
tf.compat.v1.summary.scalar("gtvar_diff", tf_gtvar_diff)
tf.compat.v1.summary.scalar("img_test0", tf_img_test0)
tf.compat.v1.summary.scalar("img_test9", tf_img_test9)
tf.compat.v1.summary.scalar("Disparity error", tf_img_test0)
tf.compat.v1.summary.scalar("NN gain over heuristic", tf_img_test9)
trainable_vars= tf.trainable_variables()
lr= tf.compat.v1.placeholder(tf.float32)
......@@ -425,7 +422,7 @@ with tf.Session() as sess:
gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train = 0.0
gtvar_test = 0.0
img_gain_test0 = 1.0
img_gain_test0 = 0.2
img_gain_test9 = 1.0
thr=None
......@@ -603,14 +600,14 @@ with tf.Session() as sess:
# Read the full image
###################################################
## test_summaries_img = [0.0]*len(ind_img) # datasets_img)
disp_out= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_d= np.empty((WIDTH*HEIGHT), dtype=np.float32)
disp_out= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_d= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_offs = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_offs = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
for ntest in ind_img: # datasets_img):
dataset_img = qsf.readImageData(
......@@ -676,27 +673,27 @@ with tf.Session() as sess:
dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1),
extra, # len 3..6,
#adding extra data layers
extra, # len 3..6, #adding extra data layers
],1)
num_slices = rslt.shape[1]
np.save(
result_file,
rslt.reshape(HEIGHT,WIDTH,-1))
rslt.reshape(IMG_HEIGHT, IMG_WIDTH,-1))
eval_rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1,
logfile=lf)
img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
# img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
img_gain_test0 = eval_rslt[9][1]
img_gain_test9 = eval_rslt[9][0]/eval_rslt[9][1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(
result_file,
ABSOLUTE_DISPARITY,
fix_nan = True,
labels=SLICE_LABELS[0:num_slices],
labels=qsf.SLICE_LABELS[0:num_slices],
logfile=lf)
"""
......
This diff is collapsed.
This diff is collapsed.
......@@ -13,6 +13,41 @@ TIME_LAST = 0
TIME_START = 0
corr2_limits = None
MARGINS = 2 # disregard errors outside
NN_DISP = 0
#HEUR_DISP = 1
TARGET_DISP = 1
GT_DISP = 2
GT_CONF = 3
NN_NAN = 4 #first insertedf layer
HEUR_NAN = 5
NN_DIFF = 6
HEUR_DIFF = 7
NN_ERR_SNGL = 8
NN_ERR_SNGL_NEIB = 9
FGBG_SNGL = 10
FGBG_SNGL_NEIB = 11 #last inserted layer
CUTCORN_COST_NW = 12
CUTCORN_COST = 13
GT_AVG_DIST = 14
AVG8_DISP = 15
GT_DISP1 = 16
OUT_AVG = 17
AUX_DISP = 18
FG_DISP = 19
BG_DISP = 20
GT_RMS = 21
GT_RMS_SPLIT = 22
EXTEND = CUTCORN_COST_NW - NN_NAN # insert this many layers (8)
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
......@@ -492,9 +527,12 @@ def readImageData(image_data,
'ntile': get_full_tile_indices(corr2d.shape[0]//width, width)}
if keep_gt:
gt_ds = dataset[:,cl+tl:cl+tl+gl]
extra = dataset[:,cl+tl+gl:]
image_data[indx]["gt_ds"] = gt_ds
image_data[indx]["gtruths"]= gt_ds.copy()
image_data[indx]["t_disps"]= target_disparity.reshape([-1,1]).copy()
image_data[indx]["extra"] = extra
image_data[indx]["t_extra"] = extra.copy()
else:
gt_ds = dataset[:,cl+tl:cl+tl+gl]
extra = dataset[:,cl+tl+gl:]
......@@ -577,42 +615,98 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=No
data will be written as 4-layer tiff, extension '.npy' replaced with '.tiff'
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
@parame insert_deltas: +1 - add delta layers, +2 - add variance (max - min of this and 8 neighbors)
@param insert_deltas: +1 - add delta layers, +2 - add variance (max - min of this and 8 neighbors)
with lwir data.shape = (15, 20, 15)
"""
data = np.load(npy_path) #(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
if labels is None:
labels = ["chn%d"%(i) for i in range(data.shape[2])]
# labels = ["nn_out","hier_out","gt_disparity","gt_strength"]
nn_out = 0
# extend = 8 # inserted extend slices
# nn_out = 0
# target_disparity = 1
gt_disparity = 2
gt_strength = 3
heur_err = 7
# gt_disparity = 2
# gt_strength = 3
# nn_out1 = 4
# heur_out = 5
# nn_err = 6
# heur_err = 7
# nn_err_sngl = 8
# nn_err_sngl_neib = 9
# fgbg_sngl = 10
# fgbg_sngl_neib = 11
# cutcorn_cost_nw = 12
# aux_disp = 18
# fg_disp = 19
# bg_disp = 20
# gt_rms = 21
# gt_rms_split = 22
min_heur_err = 0.001
height = data.shape[0]
width = data.shape[1]
nocenter9 = np.array([[[1,1,1,1,np.nan,1,1,1,1]]], dtype = data.dtype)
if not absolute:
if fix_nan:
data[...,nn_out] += np.nan_to_num(data[...,1], copy=True)
data[...,NN_DISP] += np.nan_to_num(data[...,1], copy=True)
else:
data[...,nn_out] += data[...,1]
data[...,NN_DISP] += data[...,1]
if (insert_deltas & 1):
np.nan_to_num(data[...,gt_strength], copy=False)
data = np.concatenate([data[...,0:4],data[...,0:2],data[...,0:2],data[...,4:]], axis = 2) # data[...,4:] may be empty
labels = labels[:4]+["nn_out","hier_out","nn_err","hier_err"]+labels[4:]
data[...,6] -= data[...,gt_disparity]
data[...,7] -= data[...,gt_disparity]
for l in [2, 4, 5, 6, 7]:
np.nan_to_num(data[...,GT_CONF], copy=False)
data = np.concatenate(
[data[...,0:4],
data[...,NN_DISP: NN_DISP+1],
data[...,AUX_DISP-EXTEND:AUX_DISP-EXTEND+1], #data[...,0:2],
data[...,NN_DISP: NN_DISP+1],
data[...,AUX_DISP-EXTEND:AUX_DISP-EXTEND+1], #data[...,0:2],
np.empty_like(data[...,0:4]),
data[...,4:]],
axis = 2) # data[...,4:] may be empty
labels = labels[:4]+["nn_out","heur_out","nn_err","heur_err", "nn_err_sngl", "nn_err_sngl_neib", "fgbg_sngl", "fgbg_sngl_neib"]+labels[4:]
data[..., NN_DIFF] -= data[...,GT_DISP] # 6
data[..., HEUR_DIFF] -= data[...,GT_DISP] # 7
#replace data with NaN where gt_strength == 0 in selected layers
for l in [GT_DISP, NN_NAN, HEUR_NAN, NN_DIFF, HEUR_DIFF]: # 0, 4, 5, 6, 7
if l < data.shape[2]:
data[...,l] = np.select([data[...,gt_strength]==0.0, data[...,gt_strength]>0.0], [np.nan,data[...,l]])
data[...,l] = np.select([data[...,GT_CONF]==0.0, data[...,GT_CONF]>0.0], [np.nan,data[...,l]])
# All other layers - mast too
for l in range(8,data.shape[2]):
data[...,l] = np.select([data[...,gt_strength]==0.0, data[...,gt_strength]>0.0], [np.nan,data[...,l]])
# for l in range(8,data.shape[2]):
for l in range(CUTCORN_COST_NW, AUX_DISP):
data[...,l] = np.select([data[...,GT_CONF]==0.0, data[...,GT_CONF]>0.0], [np.nan,data[...,l]])
# Filter NN errors by excluding margins and using only single-plane (no FG+BG) tiles, and tiles that do not have split FG/BG neighbors
fgbg_single = data[...,GT_RMS] <= data[...,GT_RMS_SPLIT]
fgbg_ext = 1
fgbg_single_ext = np.ones((height + 2 * fgbg_ext, width + 2 * fgbg_ext),dtype=np.bool)
fgbg_single_ext[fgbg_ext:-fgbg_ext, fgbg_ext:-fgbg_ext] = fgbg_single
for dy in range(2*fgbg_ext+1):
for dx in range(2*fgbg_ext+1):
fgbg_single_ext[dy:dy+fgbg_single.shape[0], dx:dx+fgbg_single.shape[1]] &= fgbg_single
fgbg_single2 = fgbg_single_ext[fgbg_ext:-fgbg_ext,fgbg_ext:-fgbg_ext] #
#create margins array
if MARGINS > 0:
wo_margins = np.zeros((height, width), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
fgbg_single &= wo_margins;
fgbg_single2 &= wo_margins;
data[..., NN_ERR_SNGL] = fgbg_single * data[..., NN_DIFF]
data[..., NN_ERR_SNGL_NEIB] = fgbg_single2 * data[..., NN_DIFF]
data[..., FGBG_SNGL] = fgbg_single * 1.0
data[..., FGBG_SNGL_NEIB] = fgbg_single2 * 1.0
"""
Calculate bad tiles where ggt was used as a master, to remove them from the results (later versions add random error)
"""
bad1 = abs(data[...,heur_err]) < min_heur_err
bad1 = abs(data[...,HEUR_DIFF]) < min_heur_err
bad1_ext = np.concatenate([bad1 [0:1,:], bad1 [0:1,:], bad1[:,:], bad1 [-1:height,:], bad1 [-1:height,:]],axis = 0)
bad1_ext = np.concatenate([bad1_ext[:,0:1], bad1_ext[:,0:1], bad1_ext[:,:], bad1_ext[:,-1:width], bad1_ext[:,-1:width]], axis = 1)
bad25 = np.empty(shape=[height, width, 25], dtype=bad1.dtype)
......@@ -634,10 +728,10 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=No
w8=np.array([wc,wo,wc,wo,0.0,wo,wc,wo,wc], dtype=data.dtype)
w8/=np.sum(w8) #normalize
gt_ext = np.concatenate([data[0:1,:,gt_disparity],data[:,:,gt_disparity],data[-1:height,:,gt_disparity]],axis = 0)
gt_ext = np.concatenate([gt_ext[:,0:1], gt_ext[:,:], gt_ext[:,-1:width]],axis = 1)
gs_ext = np.concatenate([data[0:1,:,gt_strength], data[:,:,gt_strength], data[-1:height,:,gt_strength]],axis = 0)
gs_ext = np.concatenate([gs_ext[:,0:1], gs_ext[:,:], gs_ext[:,-1:width]],axis = 1)
gt_ext = np.concatenate([data[0:1,:,GT_DISP], data[:,:,GT_DISP], data[-1:height,:,GT_DISP]],axis = 0)
gt_ext = np.concatenate([gt_ext[:,0:1], gt_ext[:,:], gt_ext[:,-1:width]], axis = 1)
gs_ext = np.concatenate([data[0:1,:,GT_CONF], data[:,:,GT_CONF], data[-1:height,:,GT_CONF]],axis = 0)
gs_ext = np.concatenate([gs_ext[:,0:1], gs_ext[:,:], gs_ext[:,-1:width]], axis = 1)
data9 = np.empty(shape=[height, width, 9], dtype=data.dtype)
weight9 = np.empty(shape=[height, width, 9], dtype=data.dtype)
......@@ -659,7 +753,7 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=No
dw_center = np.sum(data9*weight9, axis=2)
dw_center /= w_center # now dw_center - weighted average in the center
data[...,-3] = np.abs(data[...,gt_disparity]- dw_center)
data[...,-3] = np.abs(data[...,GT_DISP]- dw_center)
# data[...,-2] = data[...,gt_disparity]- dw_center
#data[...,-3] *= (data[...,-4] < 1.0) # just temporary
......@@ -766,7 +860,7 @@ MARGINS = 2 # disregard errors outside
for dx in range(2*radius+1):
not_nan_ext[dy:dy+not_nan.shape[0], dx:dx+not_nan.shape[1]] &= not_nan
not_nan = not_nan_ext[radius:-radius,radius:-radius]
if MARGINS > 0:
wo_margins = np.zeros((stack.shape[0],stack.shape[1]), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
not_nan &= wo_margins
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment