Commit 466ed6b1 authored by Andrey Filippov's avatar Andrey Filippov

LWIR-related changes

parent ddd193c9
......@@ -180,7 +180,7 @@ def writeTFRecordsFromImageSet(
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_STR].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
#!/usr/bin/env python3
from tensorflow.python.framework.ops import GraphKeys
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
'''
Builds (and saved) inference model from trained by nn_ds_neibs21.py
Saves the model and weights in 2 formats - using Saver (for Python) and Saved_Model (for Java or Python)
(old line, but still can be used) Model and weights are used by the inference-only infer_qcds_graph.py
Usage:
~$ python3 infer_qcds_01.py qcstereo_conf.xml data_sets
qcstereo_conf.xml - config file with all paths
from builtins import None
data_sets - root dir for trained model/checkpoints, etc.
'''
import os
import sys
import numpy as np
import time
import shutil
import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
from tensorflow.python.ops import resource_variable_ops
tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
#IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
IMG_WIDTH = None
IMG_HEIGHT = None
#WIDTH = 160
#HEIGHT = 120
"""
Next gets globals from the config file
"""
globals().update(parameters)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
#SLICE_LABELS = ["nn_out_ext","heur_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
ph_corr2d = tf.compat.v1.placeholder(np.float32, (None,FEATURES_PER_TILE), name = 'ph_corr2d')
ph_target_disparity = tf.compat.v1.placeholder(np.float32, (None,1), name = 'ph_target_disparity')
ph_ntile = tf.compat.v1.placeholder(np.int32, (None,), name = 'ph_ntile') #nTile
ph_ntile_out = tf.compat.v1.placeholder(np.int32, (None,), name = 'ph_ntile_out') #which tiles should be calculated in stage2
#corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1])],2)
tf_intile325 = tf.concat([ph_corr2d, ph_target_disparity],axis=1,name="tf_intile325") # [?,325]
pass
"""
target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
target_disparity_cluster], axis=2, name = "corr2d_Nx325")
"""
cluster_radius = CLUSTER_RADIUS
"""
Probably ResourceVariable is not needed here because of the tf.scatter_update()
If collection is not provided, it defaults to [GraphKeys.GLOBAL_VARIABLES], and that in turn fails saver.restore() as this variable was not available in the trained model
"""
'''
#rv_stage1_out = resource_variable_ops.ResourceVariable(
rv_stage1_out = tf.Variable(
np.zeros([HEIGHT * WIDTH, NN_LAYOUT1[-1]]),
## collections = [],
collections = [GraphKeys.LOCAL_VARIABLES],# Works, available with tf.local_variables()
dtype=np.float32,
name = 'rv_stage1_out')
'''
rv_stage1_out = tf.compat.v1.get_variable("rv_stage1_out",
shape=[IMG_HEIGHT * IMG_WIDTH, NN_LAYOUT1[-1]],
dtype=tf.float32,
initializer=tf.zeros_initializer,
collections = [GraphKeys.LOCAL_VARIABLES],trainable=False)
#rv_stageX_out_init_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[HEIGHT * WIDTH, NN_LAYOUT1[-1]])
#rv_stageX_out_init_op = rv_stageX_out.assign(rv_stageX_out_init_placeholder)
##stage1_tiled = tf.reshape(rv_stage1_out.read_value(),[HEIGHT, WIDTH, -1], name = 'stage1_tiled')
stage1_tiled = tf.reshape(rv_stage1_out, [IMG_HEIGHT, IMG_WIDTH, -1], name = 'stage1_tiled') # no need to synchronize here?
tf_stage1_exth = tf.concat([stage1_tiled[:,:1,:]]*cluster_radius +
[stage1_tiled] +
[stage1_tiled[:,-1:,:]]*cluster_radius, axis = 1,name = 'stage1_exth')
tf_stage1_ext = tf.concat([tf_stage1_exth[ :1,:,:]]*cluster_radius +
[tf_stage1_exth] +
[tf_stage1_exth[-1:,:,:]]*cluster_radius, axis = 0, name = 'stage1_exth')
tf_stage1_ext4 = tf.expand_dims(tf_stage1_ext, axis = 2, name = 'stage1_ext4')
concat_list = []
cluster_side = 2 * cluster_radius+1
for dy in range(cluster_side):
for dx in range(cluster_side):
# concat_list.append(tf_stage1_ext4[dy: cluster_side-dy, dx: cluster_side-dx,:,:])
concat_list.append(tf.slice(tf_stage1_ext4,[dy,dx,0,0],[IMG_HEIGHT, IMG_WIDTH,-1,-1]))
pass
tf_stage2_inm = tf.concat(concat_list, axis = 2, name ='stage2_inm') #242, 324, 25, 64
tf_stage2_in = tf.reshape(tf_stage2_inm,[-1,rv_stage1_out.shape[1]*cluster_side*cluster_side], name = 'stage2_in')
tf_stage2_in_sparse = tf.gather(tf_stage2_in, indices= ph_ntile_out, axis=0, name = 'stage2_in_sparse')
#aextv=np.concatenate([a[:,:1,:]]*2 + [a] + [a[:,-1:,:]]*2,axis = 1)
#ext=np.concatenate([aextv[:1,:,:]]*1 + [aextv] + [aextv[-1:,:,:]]*3,axis = 0)
with tf.name_scope("Disparity_net"): # to have the same scope for weight/biases?
ns, _ = qcstereo_network.network_sub(tf_intile325,
input_global = [None,ph_target_disparity][SPREAD_CONVERGENCE], # input_global[:,i,:],
layout= NN_LAYOUT1,
reuse= False,
sym8 = SYM8_SUB,
cluster_radius = 0)
update=tf.scatter_update(ref=rv_stage1_out,
indices = ph_ntile,
updates = ns,
use_locking = False,
name = 'update')
with tf.control_dependencies([update]):
stage1done = tf.constant(1, dtype=tf.int32, name="stage1done")
pass
stage2_out_sparse0 = qcstereo_network.network_inter (
input_tensor = tf_stage2_in_sparse,
input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = NN_LAYOUT2,
reuse = False,
use_confidence = False)
stage2_out_sparse = tf.identity(stage2_out_sparse0, name = 'stage2_out_sparse')
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full0 = qcstereo_network.network_inter (
input_tensor = tf_stage2_in,
input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = NN_LAYOUT2,
reuse = True,
use_confidence = False)
stage2_out_full = tf.identity(stage2_out_full0, name = 'stage2_out_full')
pass
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
"""
This is needed if ResourceVariable is used - then i/o tensors names somehow disappeared
and were replaced by 'Placeholder_*'
collection_io = 'collection_io'
tf.add_to_collection(collection_io, ph_corr2d)
tf.add_to_collection(collection_io, ph_target_disparity)
tf.add_to_collection(collection_io, ph_ntile)
tf.add_to_collection(collection_io, ph_ntile_out)
tf.add_to_collection(collection_io, stage1done)
tf.add_to_collection(collection_io, stage2_out_sparse)
"""
##saver=tf.compat.v1.train.Saver()
saver =tf.compat.v1.train.Saver(tf.global_variables())
#saver = tf.compat.v1.train.Saver(tf.global_variables()+tf.local_variables())
saver_def = saver.as_saver_def()
pass
"""
saver_def = saver.as_saver_def()
# The name of the tensor you must feed with a filename when saving/restoring.
print ('saver_def.filename_tensor_name=',saver_def.filename_tensor_name)
# The name of the target operation you must run when restoring.
print ('saver_def.restore_op_name=',saver_def.restore_op_name)
# The name of the target operation you must run when saving.
print ('saver_def.save_tensor_name=',saver_def.save_tensor_name)
saver_def.filename_tensor_name= save/Const:0
saver_def.restore_op_name= save/restore_all
saver_def.save_tensor_name= save/control_dependency:0
print(saver.save(sess, files["checkpoints"]))
"""
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver.restore(sess, files["checkpoints"])
'''
rv_stage1_out belongs to GraphKeys.LOCAL_VARIABLES
Now when weights/biases are restored from 'checkpoints',
that do not have this variable, add it to globals.
Actually it could have been declared right here - this
needs testing.
NOTE1: The line below makes the next script's, that saves
a Saved_Model MetaGraph, size of the Saved_Model significantly
bigger.
NOTE2: The line below is commented in favor of (in the next script!):
builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
'''
#tf.add_to_collection(GraphKeys.GLOBAL_VARIABLES, rv_stage1_out)
saver.save(sess, files["inference"])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
#_ = sess.run([rv_stageX_out_init_op],feed_dict={rv_stageX_out_init_placeholder: np.zeros((HEIGHT * WIDTH, NN_LAYOUT1[-1]))})
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # [?,324)
img_target = dataset_img['target_disparity'] # [?,324)
img_ntile = dataset_img['ntile'].reshape([-1])
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
'''
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file,
rslt.reshape(IMG_HEIGHT,IMG_WIDTH,-1))
rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=CLUSTER_RADIUS,
logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True, labels=SLICE_LABELS, logfile=lf)
'''
extra = dataset_img['t_extra']
if extra is None:
extra = np.array([dataset_img['gtruths'].shape[0],0])
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest], disp_out.shape[0],BATCH_SIZE
dataset_img['gtruths'], # gtruths[ntest],
#skip 6 empty layers
np.zeros([IMG_HEIGHT * IMG_WIDTH, 6], dtype = disp_out.dtype),
# dbg_cost_nw.reshape(-1,1),
# dbg_cost_w.reshape(-1,1),
# dbg_d.reshape(-1,1),
# dbg_avg_disparity.reshape(-1,1),
# dbg_gt_disparity.reshape(-1,1),
# dbg_offs.reshape(-1,1),
extra, # len 3..6, #adding extra data layers
],1)
num_slices = rslt.shape[1]
np.save(
result_file,
rslt.reshape(IMG_HEIGHT, IMG_WIDTH,-1))
eval_rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1,
logfile=lf)
# num_slices = eval_rslt.shape[1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(
result_file,
ABSOLUTE_DISPARITY,
fix_nan = True,
labels=qsf.SLICE_LABELS[0:num_slices],
logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
"""
Save MetaGraph to Saved_Model in *.pb (protocol buffer) format to
be able to use from Java
"""
# force clean
shutil.rmtree(dirs['exportdir'], ignore_errors=True)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(dirs['exportdir'])
# builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
builder.add_meta_graph_and_variables(sess,[tf.saved_model.SERVING],main_op=tf.local_variables_initializer())
builder.save(False) # True = *.pbtxt, False = *.pb
if lf:
lf.close()
writer.close()
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
'''
** Kind of obsolete now, can be used for testing **
Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
'''
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
meta_graph_source = files["inference"]+'.meta'
print("Model restore: using conventionally saved model, but saving Saved Model for the next run")
print("MetaGraph source = "+str(meta_graph_source))
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
infer_saver.restore(sess, files["inference"])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
"""
Save MetaGraph to Saved_Model in *.pb (protocol buffer) format to
be able to use from Java
"""
# force clean
shutil.rmtree(dirs['exportdir'], ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(dirs['exportdir'])
builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
builder.save(False) # True = *.pbtxt, False = *.pb
if lf:
lf.close()
writer.close()
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
# Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
# TODO: Updatew for LWIR !
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
if not use_saved_model:
print("ERROR: Saved_Model not found. Run previous script to create it.")
sys.exit()
meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #<tf.Operation 'Siam_net/stage1done' type=Const>,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
#infer_saver.restore(sess, dirs['exportdir'])
#infer_saver.restore(sess, files["inference"])
infer_saver.restore(sess,dirs['exportdir']+'/variables/variables')
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
if lf:
lf.close()
writer.close()
......@@ -20,7 +20,7 @@ import tensorflow as tf
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 20 # 324 # tiles per image row
#IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1
try:
......@@ -59,6 +59,8 @@ CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
LOGFILE="results.txt"
IMG_WIDTH = 20
IMG_HEIGHT = 15
"""
Next gets globals from the config file
"""
......@@ -70,8 +72,8 @@ qsf.setCorr2Limits(CORR2D_LIMITS) # limit min/max 2d correlation tiles values
#exit(0)
WIDTH = 20 # 324
HEIGHT = 15 # 242
#WIDTH = 20 # 324
#HEIGHT = 15 # 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
......@@ -93,11 +95,6 @@ NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
......@@ -130,7 +127,7 @@ qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS,
fgbg_mode= FGBG_MODE,
labels = SLICE_LABELS,
labels = qsf.SLICE_LABELS,
logpath= LOGPATH)
image_data = qsf.initImageData(
......@@ -328,8 +325,8 @@ with tf.name_scope('epoch_average'):
tf.compat.v1.summary.scalar("sq_diff_epoch", tf_ph_sq_diff)
tf.compat.v1.summary.scalar("gtvar_diff", tf_gtvar_diff)
tf.compat.v1.summary.scalar("img_test0", tf_img_test0)
tf.compat.v1.summary.scalar("img_test9", tf_img_test9)
tf.compat.v1.summary.scalar("Disparity error", tf_img_test0)
tf.compat.v1.summary.scalar("NN gain over heuristic", tf_img_test9)
trainable_vars= tf.trainable_variables()
lr= tf.compat.v1.placeholder(tf.float32)
......@@ -425,7 +422,7 @@ with tf.Session() as sess:
gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train = 0.0
gtvar_test = 0.0
img_gain_test0 = 1.0
img_gain_test0 = 0.2
img_gain_test9 = 1.0
thr=None
......@@ -603,14 +600,14 @@ with tf.Session() as sess:
# Read the full image
###################################################
## test_summaries_img = [0.0]*len(ind_img) # datasets_img)
disp_out= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_d= np.empty((WIDTH*HEIGHT), dtype=np.float32)
disp_out= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_d= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_offs = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_offs = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
for ntest in ind_img: # datasets_img):
dataset_img = qsf.readImageData(
......@@ -676,27 +673,27 @@ with tf.Session() as sess:
dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1),
extra, # len 3..6,
#adding extra data layers
extra, # len 3..6, #adding extra data layers
],1)
num_slices = rslt.shape[1]
np.save(
result_file,
rslt.reshape(HEIGHT,WIDTH,-1))
rslt.reshape(IMG_HEIGHT, IMG_WIDTH,-1))
eval_rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1,
logfile=lf)
img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
# img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
img_gain_test0 = eval_rslt[9][1]
img_gain_test9 = eval_rslt[9][0]/eval_rslt[9][1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(
result_file,
ABSOLUTE_DISPARITY,
fix_nan = True,
labels=SLICE_LABELS[0:num_slices],
labels=qsf.SLICE_LABELS[0:num_slices],
logfile=lf)
"""
......
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from PIL import Image
import os
import sys
#import glob
#import numpy as np
import imagej_tiffwriter
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import qcstereo_functions as qsf
import numpy as np
#import xml.etree.ElementTree as ET
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, dbg_parameters = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS,ABSOLUTE_DISPARITY = [None]*2
FIGS_EXTENSIONS = ['png','pdf','svg']
#FIGS_ESXTENSIONS = ['png','pdf','svg']
EVAL_MODES = ["train","infer"]
FIGS_SAVESHOW = ['save','show']
globals().update(parameters)
try:
FIGS_EXTENSIONS = globals()['FIGS_ESXTENSIONS'] # fixing typo in configs
except:
pass
#exit(0)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#import tensorflow.contrib.slim as slim
NN_DISP = 0
HEUR_DISP = 1
GT_DISP = 2
GT_CONF = 3
NN_NAN = 4
HEUR_NAN = 5
NN_DIFF = 6
HEUR_DIFF = 7
CONF_MAX = 0.7
ERR_AMPL = 0.3
TIGHT_TOP = 0.95
TIGHT_HPAD = 1.0
TIGHT_WPAD = 1.0
FIGSIZE = [8.5,11.0]
WOI_COLOR = "red"
TRANSPARENT = True # for export
#dbg_parameters
def get_fig_params(disparity_ranges):
fig_params = []
for dr in disparity_ranges:
if dr[-1][0]=='-':
fig_params.append(None)
else:
subs = []
for s in dr[:-1]:
mm = s[:2]
try:
lims = s[2]
except IndexError:
lims = None
subs.append({'lim_val':mm, 'lim_xy':lims})
fig_params.append({'name':dr[-1],'ranges':subs})
return fig_params
#try:
fig_params = get_fig_params(dbg_parameters['disparity_ranges'])
pass
#temporary:
TIFF_ONLY = False # True
#max_bad = 2.5 # excludes only direct bad
max_bad = 2.5 #2.5 # 1.5 # excludes only direct bad
max_diff = 1.5 # 2.0 # 5.0 # maximal max-min difference
max_target_err = 1.0 # 0.5 # maximal max-min difference
max_disp = 5.0
min_strength = 0.18 #ignore tiles below
min_neibs = 1
max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
#num_bins = 256 # number of histogram bins
num_bins = 15 # 50 # number of histogram bins
use_gt_weights = True # False # True
index_gt = 2
index_gt_weight = 3
index_heur_err = 7
index_nn_err = 6
index_mm = 8 # max-min
index_log = 9
index_bad = 10
index_num_neibs = 11
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
#min_diff = 0.25 # remove all flat tiles with spread less than this (do not show on heuristic/network disparity errors subplots
min_diff = 0 # remove all flat tiles with spread less than this
max_target_err2 = max_target_err * max_target_err
if not 'show' in FIGS_SAVESHOW:
plt.ioff()
#for mode in ['train','infer']:
for mode in ['infer']:
figs = []
ffiles = [] # no ext
def setlimsxy(lim_xy):
if not lim_xy is None:
plt.xlim(min(lim_xy[:2]),max(lim_xy[:2]))
plt.ylim(max(lim_xy[2:]),min(lim_xy[2:]))
cumul_weights = None
for nfile, fpars in enumerate(fig_params):
if not fpars is None:
img_file = files['result'][nfile]
if mode == 'infer':
img_file = img_file.replace('.npy','-infer.npy')
"""
try:
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=True)
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
except:
print ("Image file does not exist:", img_file)
continue
"""
pass
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
if True: #TIFF_ONLY:
tiff_path = img_file.replace('.npy','-test.tiff')
data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path)
imagej_tiffwriter.save(tiff_path,data,labels=labels)
"""
Calculate histograms
"""
err_heur2 = data[index_heur_err]*data[index_heur_err]
err_nn2 = data[index_nn_err]* data[index_nn_err]
diff_log2 = data[index_log]* data[index_log]
weights = (
(data[index_gt] < max_disp) &
(err_heur2 < max_target_err2) &
(data[index_bad] < max_bad) &
(data[index_gt_weight] >= min_strength) &
(data[index_num_neibs] >= min_neibs)&
#max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
(data[index_log] < max_log_to_mm * np.sqrt(data[index_mm]) )
).astype(data.dtype) # 0.0/1.1
#max_disp
#max_target_err
if use_gt_weights:
weights *= data[index_gt_weight]
mm = data[index_mm]
weh = np.nan_to_num(weights*err_heur2)
wen = np.nan_to_num(weights*err_nn2)
wel = np.nan_to_num(weights*diff_log2)
hist_weights,bin_vals = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weights, density = False)
hist_err_heur2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weh, density = False)
hist_err_nn2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wen, density = False)
hist_diff_log2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wel, density = False)
if cumul_weights is None:
cumul_weights = hist_weights
cumul_err_heur2 = hist_err_heur2
cumul_err_nn2 = hist_err_nn2
cumul_diff_log2 = hist_diff_log2
else:
cumul_weights += hist_weights
cumul_err_heur2 += hist_err_heur2
cumul_err_nn2 += hist_err_nn2
cumul_diff_log2 += hist_diff_log2
hist_err_heur2 = np.nan_to_num(hist_err_heur2/hist_weights)
hist_err_nn2 = np.nan_to_num(hist_err_nn2/hist_weights)
hist_gain2 = np.nan_to_num(hist_err_heur2/hist_err_nn2)
hist_gain = np.sqrt(hist_gain2)
hist_diff_log2 = np.nan_to_num(hist_diff_log2/hist_weights)
print("hist_err_heur2", end = " ")
print(np.sqrt(hist_err_heur2))
print("hist_err_nn2", end = " ")
print(np.sqrt(hist_err_nn2))
print("hist_gain", end = " ")
print(hist_gain)
print("hist_diff_log2", end = " ")
print(np.sqrt(hist_diff_log2))
if min_diff> 0.0:
pass
good = (mm > min_diff).astype(mm.dtype)
good /= good # good - 1, bad - nan
data[index_heur_err] *= good
data[index_nn_err] *= good
data = data.transpose(1,2,0)
if TIFF_ONLY:
continue
for subindex, rng in enumerate(fpars['ranges']):
lim_val = rng['lim_val']
lim_xy = rng['lim_xy']
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title(fpars['name'])
fig.suptitle(fpars['name'])
ax_conf=plt.subplot(322)
ax_conf.set_title("Ground truth confidence")
# fig.suptitle("Groud truth confidence")
plt.imshow(data[...,GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
if not lim_xy is None:
pass # show frame
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
plt.plot(xdata,ydata,color=WOI_COLOR)
# setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_gtd=plt.subplot(321)
ax_gtd.set_title("Ground truth disparity map")
plt.imshow(data[...,GT_DISP], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hed=plt.subplot(323)
ax_hed.set_title("Heuristic disparity map")
plt.imshow(data[...,HEUR_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nnd=plt.subplot(325)
ax_nnd.set_title("Network disparity output")
plt.imshow(data[...,NN_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hee=plt.subplot(324)
ax_hee.set_title("Heuristic disparity error")
plt.imshow(data[...,HEUR_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nne=plt.subplot(326)
ax_nne.set_title("Network disparity error")
plt.imshow(data[...,NN_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
plt.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
figs.append(fig)
fb_noext = os.path.splitext(os.path.basename(img_file))[0]#
if subindex > 0:
if subindex < 10:
fb_noext+="abcdefghi"[subindex-1]
else:
fb_noext+="-"+str(subindex)
ffiles.append(fb_noext)
pass
if True:
cumul_err_heur2 = np.nan_to_num(cumul_err_heur2/cumul_weights)
cumul_err_nn2 = np.nan_to_num(cumul_err_nn2/cumul_weights)
cumul_gain2 = np.nan_to_num(cumul_err_heur2/cumul_err_nn2)
cumul_gain = np.sqrt(cumul_gain2)
cumul_diff_log2 = np.nan_to_num(cumul_diff_log2/cumul_weights)
print("cumul_weights", end = " ")
print(cumul_weights)
print("cumul_err_heur", end = " ")
print(np.sqrt(cumul_err_heur2))
print("cumul_err_nn", end = " ")
print(np.sqrt(cumul_err_nn2))
print("cumul_gain", end = " ")
print(cumul_gain)
print("cumul_diff_log2", end = " ")
print(np.sqrt(cumul_diff_log2))
fig, ax1 = plt.subplots()
ax1.set_xlabel('3x3 tiles ground truth disparity max-min (pix)')
ax1.set_ylabel('RMSE\n(pix)', color='black', rotation='horizontal')
ax1.yaxis.set_label_coords(-0.045,0.92)
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_nn2), 'tab:red',label="network disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2), 'tab:green',label="heuristic disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_diff_log2), 'tab:cyan',label="ground truth LoG")
ax1.tick_params(axis='y', labelcolor='black')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('weight', color='black', rotation='horizontal') # we already handled the x-label with ax1
ax2.yaxis.set_label_coords(1.06,1.0)
ax2.plot(bin_vals[0:-1], cumul_weights,color='grey',dashes=[6, 2],label='weights = n_tiles * gt_confidence')
ax1.legend(loc="upper left", bbox_to_anchor=(0.2,1.0))
ax2.legend(loc="lower right", bbox_to_anchor=(1.0,0.1))
"""
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title('Cumulative')
fig.suptitle('Difference to GT')
# ax_conf=plt.subplot(322)
ax_conf=plt.subplot(211)
ax_conf.set_title("RMS vs max9-min9")
plt.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2),'red',
bin_vals[0:-1], np.sqrt(cumul_err_nn2),'green',
bin_vals[0:-1], np.sqrt(cumul_diff_log2),'blue')
figs.append(fig)
ffiles.append('cumulative')
ax_conf=plt.subplot(212)
ax_conf.set_title("weights vs max9-min9")
plt.plot(bin_vals[0:-1], cumul_weights,'black')
"""
figs.append(fig)
ffiles.append('cumulative')
pass
#bin_vals[0:-1]
# fig.suptitle("Groud truth confidence")
#
#whow to allow adjustment before applying tight_layout?
pass
for fig in figs:
fig.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
if FIGS_EXTENSIONS and figs and 'save' in FIGS_SAVESHOW:
try:
print ("Creating output directory for figures: ",dirs['figures'])
os.makedirs(dirs['figures'])
except:
pass
pp=None
if 'pdf' in FIGS_EXTENSIONS:
if mode == 'infer':
pdf_path = os.path.join(dirs['figures'],"figures-infer%s.pdf"%str(min_diff))
else:
pdf_path = os.path.join(dirs['figures'],"figures-train%s.pdf"%str(min_diff))
pp= PdfPages(pdf_path)
for fb_noext, fig in zip(ffiles,figs):
for ext in FIGS_EXTENSIONS:
if ext == 'pdf':
pass
fig.savefig(pp,format='pdf')
else:
if mode == 'infer':
noext = fb_noext+'-infer'
else:
noext = fb_noext+'-train'
fig.savefig(
fname = os.path.join(dirs['figures'],noext+"."+ext),
transparent = TRANSPARENT,
)
pass
if pp:
pp.close()
if 'show' in FIGS_SAVESHOW:
plt.show()
#FIGS_ESXTENSIONS
#qsf.evaluateAllResults(result_files = files['result'],
# absolute_disparity = ABSOLUTE_DISPARITY,
# cluster_radius = CLUSTER_RADIUS)
print("All done")
exit (0)
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from PIL import Image
import os
import sys
#import glob
#import numpy as np
import imagej_tiffwriter
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import qcstereo_functions as qsf
import numpy as np
#import xml.etree.ElementTree as ET
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, dbg_parameters = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS,ABSOLUTE_DISPARITY = [None]*2
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
FIGS_EXTENSIONS = ['png','pdf','svg']
#FIGS_ESXTENSIONS = ['png','pdf','svg']
EVAL_MODES = ["train","infer"]
FIGS_SAVESHOW = ['save','show']
globals().update(parameters)
try:
FIGS_EXTENSIONS = globals()['FIGS_ESXTENSIONS'] # fixing typo in configs
except:
pass
#exit(0)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#import tensorflow.contrib.slim as slim
#NN_DISP = 0
#HEUR_DISP = 1
#GT_DISP = 2
#GT_CONF = 3
#NN_NAN = 4
#HEUR_NAN = 5
#NN_DIFF = 6
#HEUR_DIFF = 7
# Now - more layers
CONF_MAX = 0.7
ERR_AMPL = 0.3
TIGHT_TOP = 0.95
TIGHT_HPAD = 1.0
TIGHT_WPAD = 1.0
FIGSIZE = [8.5,11.0]
WOI_COLOR = "red"
TRANSPARENT = True # for export
#dbg_parameters
def get_fig_params(disparity_ranges):
fig_params = []
for dr in disparity_ranges:
if dr[-1][0]=='-':
fig_params.append(None)
else:
subs = []
for s in dr[:-1]:
mm = s[:2]
try:
lims = s[2]
except IndexError:
lims = None
subs.append({'lim_val':mm, 'lim_xy':lims})
fig_params.append({'name':dr[-1],'ranges':subs})
return fig_params
#try:
fig_params = get_fig_params(dbg_parameters['disparity_ranges'])
pass
#temporary:
TIFF_ONLY = False # True
#max_bad = 2.5 # excludes only direct bad
max_bad = 2.5 #2.5 # 1.5 # excludes only direct bad
max_diff = 1.5 # 2.0 # 5.0 # maximal max-min difference
max_target_err = 1.0 # 0.5 # maximal max-min difference
max_disp = 5.0
min_strength = 0.18 #ignore tiles below
min_neibs = 1
max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
#num_bins = 256 # number of histogram bins
num_bins = 15 # 50 # number of histogram bins
use_gt_weights = True # False # True
index_gt = 2
index_gt_weight = 3
index_heur_err = 7
index_nn_err = 6
index_mm = 8 # max-min
index_log = 9
index_bad = 10
index_num_neibs = 11
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
#min_diff = 0.25 # remove all flat tiles with spread less than this (do not show on heuristic/network disparity errors subplots
min_diff = 0 # remove all flat tiles with spread less than this
max_target_err2 = max_target_err * max_target_err
if not 'show' in FIGS_SAVESHOW:
plt.ioff()
#for mode in ['train','infer']:
#for mode in ['infer']:
for mode in ['train']:
figs = []
ffiles = [] # no ext
def setlimsxy(lim_xy):
if not lim_xy is None:
plt.xlim(min(lim_xy[:2]),max(lim_xy[:2]))
plt.ylim(max(lim_xy[2:]),min(lim_xy[2:]))
cumul_weights = None
for nfile, fpars in enumerate(fig_params):
if not fpars is None:
img_file = files['result'][nfile]
if mode == 'infer':
img_file = img_file.replace('.npy','-infer.npy')
"""
try:
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=True)
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
except:
print ("Image file does not exist:", img_file)
continue
"""
pass
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
if True: #TIFF_ONLY:
tiff_path = img_file.replace('.npy','-test.tiff')
data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path)
imagej_tiffwriter.save(tiff_path,data,labels=labels)
"""
Calculate histograms
"""
err_heur2 = data[index_heur_err]*data[index_heur_err]
err_nn2 = data[index_nn_err]* data[index_nn_err]
diff_log2 = data[index_log]* data[index_log]
weights = (
(data[index_gt] < max_disp) &
(err_heur2 < max_target_err2) &
(data[index_bad] < max_bad) &
(data[index_gt_weight] >= min_strength) &
(data[index_num_neibs] >= min_neibs)&
#max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
(data[index_log] < max_log_to_mm * np.sqrt(data[index_mm]) )
).astype(data.dtype) # 0.0/1.1
#max_disp
#max_target_err
if use_gt_weights:
weights *= data[index_gt_weight]
mm = data[index_mm]
weh = np.nan_to_num(weights*err_heur2)
wen = np.nan_to_num(weights*err_nn2)
wel = np.nan_to_num(weights*diff_log2)
hist_weights,bin_vals = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weights, density = False)
hist_err_heur2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weh, density = False)
hist_err_nn2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wen, density = False)
hist_diff_log2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wel, density = False)
if cumul_weights is None:
cumul_weights = hist_weights
cumul_err_heur2 = hist_err_heur2
cumul_err_nn2 = hist_err_nn2
cumul_diff_log2 = hist_diff_log2
else:
cumul_weights += hist_weights
cumul_err_heur2 += hist_err_heur2
cumul_err_nn2 += hist_err_nn2
cumul_diff_log2 += hist_diff_log2
hist_err_heur2 = np.nan_to_num(hist_err_heur2/hist_weights)
hist_err_nn2 = np.nan_to_num(hist_err_nn2/hist_weights)
hist_gain2 = np.nan_to_num(hist_err_heur2/hist_err_nn2)
hist_gain = np.sqrt(hist_gain2)
hist_diff_log2 = np.nan_to_num(hist_diff_log2/hist_weights)
print("hist_err_heur2", end = " ")
print(np.sqrt(hist_err_heur2))
print("hist_err_nn2", end = " ")
print(np.sqrt(hist_err_nn2))
print("hist_gain", end = " ")
print(hist_gain)
print("hist_diff_log2", end = " ")
print(np.sqrt(hist_diff_log2))
if min_diff> 0.0:
pass
good = (mm > min_diff).astype(mm.dtype)
good /= good # good - 1, bad - nan
data[index_heur_err] *= good
data[index_nn_err] *= good
data = data.transpose(1,2,0)
if TIFF_ONLY:
continue
for subindex, rng in enumerate(fpars['ranges']):
lim_val = rng['lim_val']
lim_xy = rng['lim_xy']
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title(fpars['name'])
fig.suptitle(fpars['name'])
ax_conf=plt.subplot(322)
ax_conf.set_title("Ground truth confidence")
# fig.suptitle("Groud truth confidence")
plt.imshow(data[...,qsf.GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
if not lim_xy is None:
pass # show frame
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
plt.plot(xdata,ydata,color=WOI_COLOR)
# setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_gtd=plt.subplot(321)
ax_gtd.set_title("Ground truth disparity map")
plt.imshow(data[...,qsf.GT_DISP], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hed=plt.subplot(323)
ax_hed.set_title("Heuristic disparity map")
plt.imshow(data[...,qsf.HEUR_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nnd=plt.subplot(325)
ax_nnd.set_title("Network disparity output")
plt.imshow(data[...,qsf.NN_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hee=plt.subplot(324)
ax_hee.set_title("Heuristic disparity error")
plt.imshow(data[...,qsf.HEUR_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nne=plt.subplot(326)
ax_nne.set_title("Network disparity error")
plt.imshow(data[...,qsf.NN_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
plt.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
figs.append(fig)
fb_noext = os.path.splitext(os.path.basename(img_file))[0]#
if subindex > 0:
if subindex < 10:
fb_noext+="abcdefghi"[subindex-1]
else:
fb_noext+="-"+str(subindex)
ffiles.append(fb_noext)
pass
if True:
cumul_err_heur2 = np.nan_to_num(cumul_err_heur2/cumul_weights)
cumul_err_nn2 = np.nan_to_num(cumul_err_nn2/cumul_weights)
cumul_gain2 = np.nan_to_num(cumul_err_heur2/cumul_err_nn2)
cumul_gain = np.sqrt(cumul_gain2)
cumul_diff_log2 = np.nan_to_num(cumul_diff_log2/cumul_weights)
print("cumul_weights", end = " ")
print(cumul_weights)
print("cumul_err_heur", end = " ")
print(np.sqrt(cumul_err_heur2))
print("cumul_err_nn", end = " ")
print(np.sqrt(cumul_err_nn2))
print("cumul_gain", end = " ")
print(cumul_gain)
print("cumul_diff_log2", end = " ")
print(np.sqrt(cumul_diff_log2))
fig, ax1 = plt.subplots()
ax1.set_xlabel('3x3 tiles ground truth disparity max-min (pix)')
ax1.set_ylabel('RMSE\n(pix)', color='black', rotation='horizontal')
ax1.yaxis.set_label_coords(-0.045,0.92)
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_nn2), 'tab:red',label="network disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2), 'tab:green',label="heuristic disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_diff_log2), 'tab:cyan',label="ground truth LoG")
ax1.tick_params(axis='y', labelcolor='black')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('weight', color='black', rotation='horizontal') # we already handled the x-label with ax1
ax2.yaxis.set_label_coords(1.06,1.0)
ax2.plot(bin_vals[0:-1], cumul_weights,color='grey',dashes=[6, 2],label='weights = n_tiles * gt_confidence')
ax1.legend(loc="upper left", bbox_to_anchor=(0.2,1.0))
ax2.legend(loc="lower right", bbox_to_anchor=(1.0,0.1))
"""
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title('Cumulative')
fig.suptitle('Difference to GT')
# ax_conf=plt.subplot(322)
ax_conf=plt.subplot(211)
ax_conf.set_title("RMS vs max9-min9")
plt.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2),'red',
bin_vals[0:-1], np.sqrt(cumul_err_nn2),'green',
bin_vals[0:-1], np.sqrt(cumul_diff_log2),'blue')
figs.append(fig)
ffiles.append('cumulative')
ax_conf=plt.subplot(212)
ax_conf.set_title("weights vs max9-min9")
plt.plot(bin_vals[0:-1], cumul_weights,'black')
"""
figs.append(fig)
ffiles.append('cumulative')
pass
#bin_vals[0:-1]
# fig.suptitle("Groud truth confidence")
#
#how to allow adjustment before applying tight_layout?
pass
for fig in figs:
fig.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
if FIGS_EXTENSIONS and figs and 'save' in FIGS_SAVESHOW:
try:
print ("Creating output directory for figures: ",dirs['figures'])
os.makedirs(dirs['figures'])
except:
pass
pp=None
if 'pdf' in FIGS_EXTENSIONS:
if mode == 'infer':
pdf_path = os.path.join(dirs['figures'],"figures-infer%s.pdf"%str(min_diff))
else:
pdf_path = os.path.join(dirs['figures'],"figures-train%s.pdf"%str(min_diff))
pp= PdfPages(pdf_path)
for fb_noext, fig in zip(ffiles,figs):
for ext in FIGS_EXTENSIONS:
if ext == 'pdf':
pass
fig.savefig(pp,format='pdf')
else:
if mode == 'infer':
noext = fb_noext+'-infer'
else:
noext = fb_noext+'-train'
fig.savefig(
fname = os.path.join(dirs['figures'],noext+"."+ext),
transparent = TRANSPARENT,
)
pass
if pp:
pp.close()
if 'show' in FIGS_SAVESHOW:
plt.show()
#FIGS_ESXTENSIONS
#qsf.evaluateAllResults(result_files = files['result'],
# absolute_disparity = ABSOLUTE_DISPARITY,
# cluster_radius = CLUSTER_RADIUS)
print("All done")
exit (0)
......@@ -13,6 +13,41 @@ TIME_LAST = 0
TIME_START = 0
corr2_limits = None
MARGINS = 2 # disregard errors outside
NN_DISP = 0
#HEUR_DISP = 1
TARGET_DISP = 1
GT_DISP = 2
GT_CONF = 3
NN_NAN = 4 #first insertedf layer
HEUR_NAN = 5
NN_DIFF = 6
HEUR_DIFF = 7
NN_ERR_SNGL = 8
NN_ERR_SNGL_NEIB = 9
FGBG_SNGL = 10
FGBG_SNGL_NEIB = 11 #last inserted layer
CUTCORN_COST_NW = 12
CUTCORN_COST = 13
GT_AVG_DIST = 14
AVG8_DISP = 15
GT_DISP1 = 16
OUT_AVG = 17
AUX_DISP = 18
FG_DISP = 19
BG_DISP = 20
GT_RMS = 21
GT_RMS_SPLIT = 22
EXTEND = CUTCORN_COST_NW - NN_NAN # insert this many layers (8)
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
......@@ -492,9 +527,12 @@ def readImageData(image_data,
'ntile': get_full_tile_indices(corr2d.shape[0]//width, width)}
if keep_gt:
gt_ds = dataset[:,cl+tl:cl+tl+gl]
extra = dataset[:,cl+tl+gl:]
image_data[indx]["gt_ds"] = gt_ds
image_data[indx]["gtruths"]= gt_ds.copy()
image_data[indx]["t_disps"]= target_disparity.reshape([-1,1]).copy()
image_data[indx]["extra"] = extra
image_data[indx]["t_extra"] = extra.copy()
else:
gt_ds = dataset[:,cl+tl:cl+tl+gl]
extra = dataset[:,cl+tl+gl:]
......@@ -577,42 +615,98 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=No
data will be written as 4-layer tiff, extension '.npy' replaced with '.tiff'
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
@parame insert_deltas: +1 - add delta layers, +2 - add variance (max - min of this and 8 neighbors)
@param insert_deltas: +1 - add delta layers, +2 - add variance (max - min of this and 8 neighbors)
with lwir data.shape = (15, 20, 15)
"""
data = np.load(npy_path) #(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
if labels is None:
labels = ["chn%d"%(i) for i in range(data.shape[2])]
# labels = ["nn_out","hier_out","gt_disparity","gt_strength"]
nn_out = 0
# extend = 8 # inserted extend slices
# nn_out = 0
# target_disparity = 1
gt_disparity = 2
gt_strength = 3
heur_err = 7
# gt_disparity = 2
# gt_strength = 3
# nn_out1 = 4
# heur_out = 5
# nn_err = 6
# heur_err = 7
# nn_err_sngl = 8
# nn_err_sngl_neib = 9
# fgbg_sngl = 10
# fgbg_sngl_neib = 11
# cutcorn_cost_nw = 12
# aux_disp = 18
# fg_disp = 19
# bg_disp = 20
# gt_rms = 21
# gt_rms_split = 22
min_heur_err = 0.001
height = data.shape[0]
width = data.shape[1]
nocenter9 = np.array([[[1,1,1,1,np.nan,1,1,1,1]]], dtype = data.dtype)
if not absolute:
if fix_nan:
data[...,nn_out] += np.nan_to_num(data[...,1], copy=True)
data[...,NN_DISP] += np.nan_to_num(data[...,1], copy=True)
else:
data[...,nn_out] += data[...,1]
data[...,NN_DISP] += data[...,1]
if (insert_deltas & 1):
np.nan_to_num(data[...,gt_strength], copy=False)
data = np.concatenate([data[...,0:4],data[...,0:2],data[...,0:2],data[...,4:]], axis = 2) # data[...,4:] may be empty
labels = labels[:4]+["nn_out","hier_out","nn_err","hier_err"]+labels[4:]
data[...,6] -= data[...,gt_disparity]
data[...,7] -= data[...,gt_disparity]
for l in [2, 4, 5, 6, 7]:
np.nan_to_num(data[...,GT_CONF], copy=False)
data = np.concatenate(
[data[...,0:4],
data[...,NN_DISP: NN_DISP+1],
data[...,AUX_DISP-EXTEND:AUX_DISP-EXTEND+1], #data[...,0:2],
data[...,NN_DISP: NN_DISP+1],
data[...,AUX_DISP-EXTEND:AUX_DISP-EXTEND+1], #data[...,0:2],
np.empty_like(data[...,0:4]),
data[...,4:]],
axis = 2) # data[...,4:] may be empty
labels = labels[:4]+["nn_out","heur_out","nn_err","heur_err", "nn_err_sngl", "nn_err_sngl_neib", "fgbg_sngl", "fgbg_sngl_neib"]+labels[4:]
data[..., NN_DIFF] -= data[...,GT_DISP] # 6
data[..., HEUR_DIFF] -= data[...,GT_DISP] # 7
#replace data with NaN where gt_strength == 0 in selected layers
for l in [GT_DISP, NN_NAN, HEUR_NAN, NN_DIFF, HEUR_DIFF]: # 0, 4, 5, 6, 7
if l < data.shape[2]:
data[...,l] = np.select([data[...,gt_strength]==0.0, data[...,gt_strength]>0.0], [np.nan,data[...,l]])
data[...,l] = np.select([data[...,GT_CONF]==0.0, data[...,GT_CONF]>0.0], [np.nan,data[...,l]])
# All other layers - mast too
for l in range(8,data.shape[2]):
data[...,l] = np.select([data[...,gt_strength]==0.0, data[...,gt_strength]>0.0], [np.nan,data[...,l]])
# for l in range(8,data.shape[2]):
for l in range(CUTCORN_COST_NW, AUX_DISP):
data[...,l] = np.select([data[...,GT_CONF]==0.0, data[...,GT_CONF]>0.0], [np.nan,data[...,l]])
# Filter NN errors by excluding margins and using only single-plane (no FG+BG) tiles, and tiles that do not have split FG/BG neighbors
fgbg_single = data[...,GT_RMS] <= data[...,GT_RMS_SPLIT]
fgbg_ext = 1
fgbg_single_ext = np.ones((height + 2 * fgbg_ext, width + 2 * fgbg_ext),dtype=np.bool)
fgbg_single_ext[fgbg_ext:-fgbg_ext, fgbg_ext:-fgbg_ext] = fgbg_single
for dy in range(2*fgbg_ext+1):
for dx in range(2*fgbg_ext+1):
fgbg_single_ext[dy:dy+fgbg_single.shape[0], dx:dx+fgbg_single.shape[1]] &= fgbg_single
fgbg_single2 = fgbg_single_ext[fgbg_ext:-fgbg_ext,fgbg_ext:-fgbg_ext] #
#create margins array
if MARGINS > 0:
wo_margins = np.zeros((height, width), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
fgbg_single &= wo_margins;
fgbg_single2 &= wo_margins;
data[..., NN_ERR_SNGL] = fgbg_single * data[..., NN_DIFF]
data[..., NN_ERR_SNGL_NEIB] = fgbg_single2 * data[..., NN_DIFF]
data[..., FGBG_SNGL] = fgbg_single * 1.0
data[..., FGBG_SNGL_NEIB] = fgbg_single2 * 1.0
"""
Calculate bad tiles where ggt was used as a master, to remove them from the results (later versions add random error)
"""
bad1 = abs(data[...,heur_err]) < min_heur_err
bad1 = abs(data[...,HEUR_DIFF]) < min_heur_err
bad1_ext = np.concatenate([bad1 [0:1,:], bad1 [0:1,:], bad1[:,:], bad1 [-1:height,:], bad1 [-1:height,:]],axis = 0)
bad1_ext = np.concatenate([bad1_ext[:,0:1], bad1_ext[:,0:1], bad1_ext[:,:], bad1_ext[:,-1:width], bad1_ext[:,-1:width]], axis = 1)
bad25 = np.empty(shape=[height, width, 25], dtype=bad1.dtype)
......@@ -634,10 +728,10 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=No
w8=np.array([wc,wo,wc,wo,0.0,wo,wc,wo,wc], dtype=data.dtype)
w8/=np.sum(w8) #normalize
gt_ext = np.concatenate([data[0:1,:,gt_disparity],data[:,:,gt_disparity],data[-1:height,:,gt_disparity]],axis = 0)
gt_ext = np.concatenate([gt_ext[:,0:1], gt_ext[:,:], gt_ext[:,-1:width]],axis = 1)
gs_ext = np.concatenate([data[0:1,:,gt_strength], data[:,:,gt_strength], data[-1:height,:,gt_strength]],axis = 0)
gs_ext = np.concatenate([gs_ext[:,0:1], gs_ext[:,:], gs_ext[:,-1:width]],axis = 1)
gt_ext = np.concatenate([data[0:1,:,GT_DISP], data[:,:,GT_DISP], data[-1:height,:,GT_DISP]],axis = 0)
gt_ext = np.concatenate([gt_ext[:,0:1], gt_ext[:,:], gt_ext[:,-1:width]], axis = 1)
gs_ext = np.concatenate([data[0:1,:,GT_CONF], data[:,:,GT_CONF], data[-1:height,:,GT_CONF]],axis = 0)
gs_ext = np.concatenate([gs_ext[:,0:1], gs_ext[:,:], gs_ext[:,-1:width]], axis = 1)
data9 = np.empty(shape=[height, width, 9], dtype=data.dtype)
weight9 = np.empty(shape=[height, width, 9], dtype=data.dtype)
......@@ -659,7 +753,7 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=No
dw_center = np.sum(data9*weight9, axis=2)
dw_center /= w_center # now dw_center - weighted average in the center
data[...,-3] = np.abs(data[...,gt_disparity]- dw_center)
data[...,-3] = np.abs(data[...,GT_DISP]- dw_center)
# data[...,-2] = data[...,gt_disparity]- dw_center
#data[...,-3] *= (data[...,-4] < 1.0) # just temporary
......@@ -766,7 +860,7 @@ MARGINS = 2 # disregard errors outside
for dx in range(2*radius+1):
not_nan_ext[dy:dy+not_nan.shape[0], dx:dx+not_nan.shape[1]] &= not_nan
not_nan = not_nan_ext[radius:-radius,radius:-radius]
if MARGINS > 0:
wo_margins = np.zeros((stack.shape[0],stack.shape[1]), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
not_nan &= wo_margins
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment