Commit bcf427de authored by Andrey Filippov's avatar Andrey Filippov

Modified to use newer Tiff write

parent 38b9513f
...@@ -420,7 +420,7 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan): ...@@ -420,7 +420,7 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan):
else: else:
data[...,0] += data[...,1] data[...,0] += data[...,1]
data = data.transpose(2,0,1) data = data.transpose(2,0,1)
imagej_tiffwriter.save(tiff_path,data[...,np.newaxis]) imagej_tiffwriter.save(tiff_path,data)
def eval_results(rslt_path, absolute, def eval_results(rslt_path, absolute,
......
...@@ -425,7 +425,7 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan): ...@@ -425,7 +425,7 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan):
else: else:
data[...,0] += data[...,1] data[...,0] += data[...,1]
data = data.transpose(2,0,1) data = data.transpose(2,0,1)
imagej_tiffwriter.save(tiff_path,data[...,np.newaxis]) imagej_tiffwriter.save(tiff_path,data)
def eval_results(rslt_path, absolute, def eval_results(rslt_path, absolute,
......
...@@ -424,7 +424,7 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan): ...@@ -424,7 +424,7 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan):
else: else:
data[...,0] += data[...,1] data[...,0] += data[...,1]
data = data.transpose(2,0,1) data = data.transpose(2,0,1)
imagej_tiffwriter.save(tiff_path,data[...,np.newaxis]) imagej_tiffwriter.save(tiff_path,data)
def eval_results(rslt_path, absolute, def eval_results(rslt_path, absolute,
...@@ -1161,6 +1161,7 @@ with tf.Session() as sess: ...@@ -1161,6 +1161,7 @@ with tf.Session() as sess:
num_train_variants = len(datasets_train) num_train_variants = len(datasets_train)
thr=None; thr=None;
thr_result = None
trains_to_update = [train_next[n_train]['files'] > train_next[n_train]['slots'] for n_train in range(len(train_next))] trains_to_update = [train_next[n_train]['files'] > train_next[n_train]['slots'] for n_train in range(len(train_next))]
for epoch in range (EPOCHS_TO_RUN): for epoch in range (EPOCHS_TO_RUN):
""" """
......
...@@ -101,6 +101,12 @@ NN_LAYOUTS = {0:[0, 0, 0, 32, 20, 16], ...@@ -101,6 +101,12 @@ NN_LAYOUTS = {0:[0, 0, 0, 32, 20, 16],
NN_LAYOUT1 = NN_LAYOUTS[NET_ARCH1] NN_LAYOUT1 = NN_LAYOUTS[NET_ARCH1]
NN_LAYOUT2 = NN_LAYOUTS[NET_ARCH2] NN_LAYOUT2 = NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt-avg_dist","avg8_disp","gt_disp","out-avg"]
############################################################################## ##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1) cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1) center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
...@@ -115,11 +121,11 @@ if not USE_PARTIALS: ...@@ -115,11 +121,11 @@ if not USE_PARTIALS:
import tensorflow as tf import tensorflow as tf
#import tensorflow.contrib.slim as slim
qsf.evaluateAllResults(result_files = files['result'], qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY, absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS) cluster_radius = CLUSTER_RADIUS,
labels = SLICE_LABELS)
image_data = qsf.initImageData( image_data = qsf.initImageData(
files = files, files = files,
...@@ -130,14 +136,12 @@ image_data = qsf.initImageData( ...@@ -130,14 +136,12 @@ image_data = qsf.initImageData(
width = IMG_WIDTH, width = IMG_WIDTH,
replace_nans = True) replace_nans = True)
# return train_next, dataset_train_all, datasets_test
corr2d_len, target_disparity_len, _ = qsf.get_lengths(CLUSTER_RADIUS, TILE_LAYERS, TILE_SIDE) corr2d_len, target_disparity_len, _ = qsf.get_lengths(CLUSTER_RADIUS, TILE_LAYERS, TILE_SIDE)
train_next, dataset_train, datasets_test= qsf.initTrainTestData( train_next, dataset_train, datasets_test= qsf.initTrainTestData(
files = files, files = files,
cluster_radius = CLUSTER_RADIUS, cluster_radius = CLUSTER_RADIUS,
buffer_size = TRAIN_BUFFER_SIZE * BATCH_SIZE) # number of clusters per train buffer_size = TRAIN_BUFFER_SIZE * BATCH_SIZE) # number of clusters per train
## return corr2d_len, target_disparity_len, train_next, dataset_train_merged, datasets_test
corr2d_train_placeholder = tf.placeholder(dataset_train.dtype, (None,FEATURES_PER_TILE * cluster_size)) # corr2d_train.shape) corr2d_train_placeholder = tf.placeholder(dataset_train.dtype, (None,FEATURES_PER_TILE * cluster_size)) # corr2d_train.shape)
...@@ -153,15 +157,9 @@ tf_batch_weights = tf.placeholder(shape=(None,), dtype=tf.float32, name = "batch ...@@ -153,15 +157,9 @@ tf_batch_weights = tf.placeholder(shape=(None,), dtype=tf.float32, name = "batch
feed_batch_weights = np.array(BATCH_WEIGHTS*(BATCH_SIZE//len(BATCH_WEIGHTS)), dtype=np.float32) feed_batch_weights = np.array(BATCH_WEIGHTS*(BATCH_SIZE//len(BATCH_WEIGHTS)), dtype=np.float32)
feed_batch_weight_1 = np.array([1.0], dtype=np.float32) feed_batch_weight_1 = np.array([1.0], dtype=np.float32)
##dataset_train_size = len(datasets_train[0]['corr2d'])
##dataset_train_size //= BATCH_SIZE
#dataset_train_size = TRAIN_BUFFER_GPU * num_train_subs # TRAIN_BUFFER_SIZE
#dataset_test_size = len(datasets_test[0]['corr2d'])
dataset_test_size = len(datasets_test[0]) dataset_test_size = len(datasets_test[0])
dataset_test_size //= BATCH_SIZE dataset_test_size //= BATCH_SIZE
#dataset_img_size = len(datasets_img[0]['corr2d'])
dataset_img_size = len(image_data[0]['corr2d']) dataset_img_size = len(image_data[0]['corr2d'])
dataset_img_size //= BATCH_SIZE dataset_img_size //= BATCH_SIZE
...@@ -170,7 +168,6 @@ dataset_tt = dataset_tt.prefetch(BATCH_SIZE) ...@@ -170,7 +168,6 @@ dataset_tt = dataset_tt.prefetch(BATCH_SIZE)
iterator_tt = dataset_tt.make_initializable_iterator() iterator_tt = dataset_tt.make_initializable_iterator()
next_element_tt = iterator_tt.get_next() next_element_tt = iterator_tt.get_next()
#https://www.tensorflow.org/versions/r1.5/programmers_guide/datasets
result_dir = './attic/result_neibs_'+ SUFFIX+'/' result_dir = './attic/result_neibs_'+ SUFFIX+'/'
checkpoint_dir = './attic/result_neibs_'+ SUFFIX+'/' checkpoint_dir = './attic/result_neibs_'+ SUFFIX+'/'
save_freq = 500 save_freq = 500
...@@ -181,7 +178,6 @@ def debug_gt_variance( ...@@ -181,7 +178,6 @@ def debug_gt_variance(
gt_ds_batch # [?:9:2] gt_ds_batch # [?:9:2]
): ):
with tf.name_scope("Debug_GT_Variance"): with tf.name_scope("Debug_GT_Variance"):
# tf_num_tiles = tf.shape(gt_ds_batch)[0]
d_gt_this = tf.reshape(gt_ds_batch[:,2 * indx],[-1], name = "d_this") d_gt_this = tf.reshape(gt_ds_batch[:,2 * indx],[-1], name = "d_this")
d_gt_center = tf.reshape(gt_ds_batch[:,2 * center_indx],[-1], name = "d_center") d_gt_center = tf.reshape(gt_ds_batch[:,2 * center_indx],[-1], name = "d_center")
d_gt_diff = tf.subtract(d_gt_this, d_gt_center, name = "d_diff") d_gt_diff = tf.subtract(d_gt_this, d_gt_center, name = "d_diff")
...@@ -189,9 +185,6 @@ def debug_gt_variance( ...@@ -189,9 +185,6 @@ def debug_gt_variance(
d_gt_var = tf.reduce_mean(d_gt_diff2, name = "d_gt_var") d_gt_var = tf.reduce_mean(d_gt_diff2, name = "d_gt_var")
return d_gt_var return d_gt_var
#def batchLoss
target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster") target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"), corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
target_disparity_cluster], axis=2, name = "corr2d_Nx325") target_disparity_cluster], axis=2, name = "corr2d_Nx325")
...@@ -223,7 +216,6 @@ tf_partial_weights = tf.constant(PARTIALS_WEIGHTS,dtype=tf.float32,name="partial ...@@ -223,7 +216,6 @@ tf_partial_weights = tf.constant(PARTIALS_WEIGHTS,dtype=tf.float32,name="partial
G_losses = [0.0]*len(partials) G_losses = [0.0]*len(partials)
target_disparity_batch= next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1] target_disparity_batch= next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1]
gt_ds_batch_clust = next_element_tt['gt_ds'] gt_ds_batch_clust = next_element_tt['gt_ds']
#gt_ds_batch = next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)]
gt_ds_batch = gt_ds_batch_clust[:,2 * center_tile_index: 2 * (center_tile_index +1)] gt_ds_batch = gt_ds_batch_clust[:,2 * center_tile_index: 2 * (center_tile_index +1)]
G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _cost1 = qcstereo_losses.batchLoss( G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _cost1 = qcstereo_losses.batchLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result out_batch = outs[0], # [batch_size,(1..2)] tf_result
...@@ -282,8 +274,6 @@ else: ...@@ -282,8 +274,6 @@ else:
S_loss = tf.constant(0.0, dtype=tf.float32,name = "S_loss") S_loss = tf.constant(0.0, dtype=tf.float32,name = "S_loss")
GS_loss = G_losses_sum # G_loss GS_loss = G_losses_sum # G_loss
# G_loss += Glosses[n]*PARTIALS_WEIGHTS[n]
#tf_partial_weights
if WLOSS_LAMBDA > 0.0: if WLOSS_LAMBDA > 0.0:
W_loss = qcstereo_losses.weightsLoss( W_loss = qcstereo_losses.weightsLoss(
inp_weights = inp_weights[0], # inp_weights - list of tensors, currently - just [0] inp_weights = inp_weights[0], # inp_weights - list of tensors, currently - just [0]
...@@ -291,12 +281,11 @@ if WLOSS_LAMBDA > 0.0: ...@@ -291,12 +281,11 @@ if WLOSS_LAMBDA > 0.0:
tile_side = TILE_SIDE, # 9 tile_side = TILE_SIDE, # 9
wborders_zero = WBORDERS_ZERO) wborders_zero = WBORDERS_ZERO)
# GW_loss = tf.add(G_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
GW_loss = tf.add(GS_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss") GW_loss = tf.add(GS_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
else: else:
GW_loss = GS_loss # G_loss GW_loss = GS_loss # G_loss
W_loss = tf.constant(0.0, dtype=tf.float32,name = "W_loss") W_loss = tf.constant(0.0, dtype=tf.float32,name = "W_loss")
#debug
GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8) GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8)
center_indx = 4, # center tile index center_indx = 4, # center tile index
gt_ds_batch = next_element_tt['gt_ds'])# [?:18] gt_ds_batch = next_element_tt['gt_ds'])# [?:18]
...@@ -319,8 +308,6 @@ with tf.name_scope('sample'): ...@@ -319,8 +308,6 @@ with tf.name_scope('sample'):
tf.summary.scalar("gtvar_diff", GT_variance) tf.summary.scalar("gtvar_diff", GT_variance)
with tf.name_scope('epoch_average'): with tf.name_scope('epoch_average'):
# for i, tl in enumerate(tf_ph_G_losses):
# tf.summary.scalar("GW_loss_epoch_"+str(i), tl)
for i in range(tf_ph_G_losses.shape[0]): for i in range(tf_ph_G_losses.shape[0]):
tf.summary.scalar("G_loss_epoch_"+str(i), tf_ph_G_losses[i]) tf.summary.scalar("G_loss_epoch_"+str(i), tf_ph_G_losses[i])
...@@ -375,7 +362,7 @@ with tf.Session() as sess: ...@@ -375,7 +362,7 @@ with tf.Session() as sess:
loss_w_train_hist= np.empty(dataset_train_size, dtype=np.float32) loss_w_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_gw_test_hist= np.empty(dataset_test_size, dtype=np.float32) loss_gw_test_hist= np.empty(dataset_test_size, dtype=np.float32)
# loss_g_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_g_test_hists= [np.empty(dataset_test_size, dtype=np.float32) for p in partials] loss_g_test_hists= [np.empty(dataset_test_size, dtype=np.float32) for p in partials]
loss_s_test_hist= np.empty(dataset_test_size, dtype=np.float32) loss_s_test_hist= np.empty(dataset_test_size, dtype=np.float32)
...@@ -465,12 +452,10 @@ with tf.Session() as sess: ...@@ -465,12 +452,10 @@ with tf.Session() as sess:
for i in range(dataset_train_size): for i in range(dataset_train_size):
try: try:
# train_summary,_, GW_loss_trained, G_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
train_summary,_, GW_loss_trained, G_losses_trained, S_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run( train_summary,_, GW_loss_trained, G_losses_trained, S_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[ merged, [ merged,
G_opt, G_opt,
GW_loss, GW_loss,
# G_loss,
G_losses, G_losses,
S_loss, S_loss,
W_loss, W_loss,
...@@ -506,13 +491,12 @@ with tf.Session() as sess: ...@@ -506,13 +491,12 @@ with tf.Session() as sess:
except tf.errors.OutOfRangeError: except tf.errors.OutOfRangeError:
print("****** NO MORE DATA! train done at step %d"%(i)) print("****** NO MORE DATA! train done at step %d"%(i))
break break
# print ("==== i=%d, GW_loss_trained=%f loss_gw_train_hist[%d]=%f ===="%(i,GW_loss_trained,i,loss_gw_train_hist[i]))
train_gw_avg = np.average(loss_gw_train_hist).astype(np.float32) train_gw_avg = np.average(loss_gw_train_hist).astype(np.float32)
train_g_avg = np.average(loss_g_train_hist).astype(np.float32) train_g_avg = np.average(loss_g_train_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_train_hists): for nn, lgth in enumerate(loss_g_train_hists):
train_g_avgs[nn] = np.average(lgth).astype(np.float32) train_g_avgs[nn] = np.average(lgth).astype(np.float32)
###############
train_s_avg = np.average(loss_s_train_hist).astype(np.float32) train_s_avg = np.average(loss_s_train_hist).astype(np.float32)
train_w_avg = np.average(loss_w_train_hist).astype(np.float32) train_w_avg = np.average(loss_w_train_hist).astype(np.float32)
train2_avg = np.average(loss2_train_hist).astype(np.float32) train2_avg = np.average(loss2_train_hist).astype(np.float32)
...@@ -638,7 +622,6 @@ with tf.Session() as sess: ...@@ -638,7 +622,6 @@ with tf.Session() as sess:
], ],
feed_dict={ feed_dict={
tf_batch_weights: feed_batch_weight_1, # feed_batch_weights, tf_batch_weights: feed_batch_weight_1, # feed_batch_weights,
# lr: learning_rate,
tf_ph_GW_loss: test_gw_avg, tf_ph_GW_loss: test_gw_avg,
tf_ph_G_loss: test_g_avg, tf_ph_G_loss: test_g_avg,
tf_ph_G_losses: train_g_avgs, # temporary, there is o data for test tf_ph_G_losses: train_g_avgs, # temporary, there is o data for test
...@@ -671,7 +654,6 @@ with tf.Session() as sess: ...@@ -671,7 +654,6 @@ with tf.Session() as sess:
except: except:
pass pass
# rslt = np.concatenate([disp_out.reshape(-1,1), t_disp, gtruth],1)
rslt = np.concatenate( rslt = np.concatenate(
[disp_out.reshape(-1,1), [disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest], dataset_img['t_disps'], #t_disps[ntest],
...@@ -682,12 +664,13 @@ with tf.Session() as sess: ...@@ -682,12 +664,13 @@ with tf.Session() as sess:
dbg_avg_disparity.reshape(-1,1), dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1), dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1)],1) dbg_offs.reshape(-1,1)],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1)) np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY,radius=CLUSTER_RADIUS) rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY,radius=CLUSTER_RADIUS)
img_gain_test0 = rslt[0][0]/rslt[0][1] img_gain_test0 = rslt[0][0]/rslt[0][1]
img_gain_test9 = rslt[9][0]/rslt[9][1] img_gain_test9 = rslt[9][0]/rslt[9][1]
if SAVE_TIFFS: if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True) qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS)
""" """
Remove dataset_img (if it is not [0] to reduce memory footprint Remove dataset_img (if it is not [0] to reduce memory footprint
...@@ -699,7 +682,6 @@ with tf.Session() as sess: ...@@ -699,7 +682,6 @@ with tf.Session() as sess:
train_writer.close() train_writer.close()
test_writer.close() test_writer.close()
test_writer1.close() test_writer1.close()
#reports error: Exception ignored in: <bound method BaseSession.__del__ of <tensorflow.python.client.session.Session object at 0x7efc5f720ef0>> if there is no print before exit()
print("All done") print("All done")
exit (0) exit (0)
...@@ -5,13 +5,13 @@ __license__ = "GPL-3.0+" ...@@ -5,13 +5,13 @@ __license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com" __email__ = "andrey@elphel.com"
from PIL import Image #from PIL import Image
import os import os
import sys import sys
import glob #import glob
import numpy as np #import numpy as np
import time import time
......
...@@ -3,7 +3,6 @@ __copyright__ = "Copyright 2018, Elphel, Inc." ...@@ -3,7 +3,6 @@ __copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+" __license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com" __email__ = "andrey@elphel.com"
#from numpy import float64
import os import os
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
...@@ -46,7 +45,6 @@ def parseXmlConfig(conf_file, root_dir): ...@@ -46,7 +45,6 @@ def parseXmlConfig(conf_file, root_dir):
files={} files={}
for p in root.find('files'): for p in root.find('files'):
files[p.tag]=eval(p.text.strip()) files[p.tag]=eval(p.text.strip())
# globals().update(parameters)
dbg_parameters = {} dbg_parameters = {}
for p in root.find('dbg_parameters'): for p in root.find('dbg_parameters'):
dbg_parameters[p.tag]=eval(p.text.strip()) dbg_parameters[p.tag]=eval(p.text.strip())
...@@ -94,9 +92,6 @@ def readTFRewcordsEpoch(train_filename, cluster_radius): ...@@ -94,9 +92,6 @@ def readTFRewcordsEpoch(train_filename, cluster_radius):
file_all = os.path.join(npy_dir,filebasename + '.npy') file_all = os.path.join(npy_dir,filebasename + '.npy')
if os.path.exists(file_all): if os.path.exists(file_all):
data = np.load (file_all) data = np.load (file_all)
# corr2d= np.load (file_corr2d)
# target_disparity = np.load(file_target_disparity)
# gt_ds = np.load(file_gt_ds)
else: else:
record_iterator = tf.python_io.tf_record_iterator(path=train_filename) record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[] corr2d_list=[]
...@@ -152,16 +147,12 @@ def read_and_decode(filename_queue, featrures_per_tile): ...@@ -152,16 +147,12 @@ def read_and_decode(filename_queue, featrures_per_tile):
target_disparity = features['target_disparity'] # tf.decode_raw(features['target_disparity'], tf.float32) target_disparity = features['target_disparity'] # tf.decode_raw(features['target_disparity'], tf.float32)
gt_ds = tf.cast(features['gt_ds'], tf.float32) # tf.decode_raw(features['gt_ds'], tf.float32) gt_ds = tf.cast(features['gt_ds'], tf.float32) # tf.decode_raw(features['gt_ds'], tf.float32)
in_features = tf.concat([corr2d,target_disparity],0) in_features = tf.concat([corr2d,target_disparity],0)
# still some nan-s in correlation data?
# in_features_clean = tf.where(tf.is_nan(in_features), tf.zeros_like(in_features), in_features)
# corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features_clean, target_disparity, gt_ds],
corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features, target_disparity, gt_ds], corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features, target_disparity, gt_ds],
batch_size=1000, # 2, batch_size=1000, # 2,
capacity=30, capacity=30,
num_threads=2, num_threads=2,
min_after_dequeue=10) min_after_dequeue=10)
return corr2d_out, target_disparity_out, gt_ds_out return corr2d_out, target_disparity_out, gt_ds_out
#http://adventuresinmachinelearning.com/introduction-tensorflow-queuing/
def add_margins(npa,radius, val = np.nan): def add_margins(npa,radius, val = np.nan):
npa_ext = np.empty((npa.shape[0]+2*radius, npa.shape[1]+2*radius, npa.shape[2]), dtype = npa.dtype) npa_ext = np.empty((npa.shape[0]+2*radius, npa.shape[1]+2*radius, npa.shape[2]), dtype = npa.dtype)
npa_ext[radius:radius + npa.shape[0],radius:radius + npa.shape[1]] = npa npa_ext[radius:radius + npa.shape[0],radius:radius + npa.shape[1]] = npa
...@@ -304,7 +295,6 @@ def initTrainTestData( ...@@ -304,7 +295,6 @@ def initTrainTestData(
""" """
num_trains = len(files['train']) num_trains = len(files['train'])
num_entries = num_trains * buffer_size num_entries = num_trains * buffer_size
# dataset_train_all = None
dataset_train_merged = None dataset_train_merged = None
train_next = [None]*num_trains train_next = [None]*num_trains
for n_train, f_train in enumerate(files['train']): for n_train, f_train in enumerate(files['train']):
...@@ -402,7 +392,7 @@ def initImageData(files, ...@@ -402,7 +392,7 @@ def initImageData(files,
print_time(" Done") print_time(" Done")
return img_data return img_data
def evaluateAllResults(result_files, absolute_disparity, cluster_radius): def evaluateAllResults(result_files, absolute_disparity, cluster_radius, labels=None):
for result_file in result_files: for result_file in result_files:
try: try:
print_time("Reading resuts from "+result_file, end=" ") print_time("Reading resuts from "+result_file, end=" ")
...@@ -412,12 +402,12 @@ def evaluateAllResults(result_files, absolute_disparity, cluster_radius): ...@@ -412,12 +402,12 @@ def evaluateAllResults(result_files, absolute_disparity, cluster_radius):
continue continue
print_time("Done") print_time("Done")
print_time("Saving resuts to tiff", end=" ") print_time("Saving resuts to tiff", end=" ")
result_npy_to_tiff(result_file, absolute_disparity, fix_nan = True) result_npy_to_tiff(result_file, absolute_disparity, fix_nan = True, labels=labels)
print_time("Done") print_time("Done")
def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True): def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=None):
""" """
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength @param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
...@@ -426,6 +416,9 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True): ...@@ -426,6 +416,9 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True):
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan @param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
""" """
data = np.load(npy_path) #(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf] data = np.load(npy_path) #(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
if labels is None:
labels = ["chn%d"%(i) for i in range(data.shape[0])]
# labels = ["nn_out","hier_out","gt_disparity","gt_strength"]
nn_out = 0 nn_out = 0
# target_disparity = 1 # target_disparity = 1
gt_disparity = 2 gt_disparity = 2
...@@ -438,6 +431,7 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True): ...@@ -438,6 +431,7 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True):
if insert_deltas: if insert_deltas:
np.nan_to_num(data[...,gt_strength], copy=False) np.nan_to_num(data[...,gt_strength], copy=False)
data = np.concatenate([data[...,0:4],data[...,0:2],data[...,0:2],data[...,4:]], axis = 2) data = np.concatenate([data[...,0:4],data[...,0:2],data[...,0:2],data[...,4:]], axis = 2)
labels = labels[:4]+["nn_out","hier_out","nn_err","hier_err"]+labels[4:]
data[...,6] -= data[...,gt_disparity] data[...,6] -= data[...,gt_disparity]
data[...,7] -= data[...,gt_disparity] data[...,7] -= data[...,gt_disparity]
for l in [2, 4, 5, 6, 7]: for l in [2, 4, 5, 6, 7]:
...@@ -445,9 +439,13 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True): ...@@ -445,9 +439,13 @@ def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True):
# All other layers - mast too # All other layers - mast too
for l in range(8,data.shape[2]): for l in range(8,data.shape[2]):
data[...,l] = np.select([data[...,gt_strength]==0.0, data[...,gt_strength]>0.0], [np.nan,data[...,l]]) data[...,l] = np.select([data[...,gt_strength]==0.0, data[...,gt_strength]>0.0], [np.nan,data[...,l]])
return data return data, labels
def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True): def result_npy_to_tiff(npy_path,
absolute,
fix_nan,
insert_deltas=True,
labels = None):
""" """
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength @param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
...@@ -455,12 +453,12 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True): ...@@ -455,12 +453,12 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True):
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity @param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan @param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
""" """
data = result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas) data,labels = result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas, labels=labels)
tiff_path = npy_path.replace('.npy','.tiff') tiff_path = npy_path.replace('.npy','.tiff')
data = data.transpose(2,0,1) data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path) print("Saving results to TIFF: "+tiff_path)
imagej_tiffwriter.save(tiff_path,data[...,np.newaxis]) imagej_tiffwriter.save(tiff_path,data,labels=labels)
def eval_results(rslt_path, absolute, def eval_results(rslt_path, absolute,
min_disp = -0.1, #minimal GT disparity min_disp = -0.1, #minimal GT disparity
...@@ -469,7 +467,6 @@ def eval_results(rslt_path, absolute, ...@@ -469,7 +467,6 @@ def eval_results(rslt_path, absolute,
max_ofst_result = 1.0, max_ofst_result = 1.0,
str_pow = 2.0, str_pow = 2.0,
radius = 0): radius = 0):
# for min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow in [
variants = [[ -0.1, 5.0, 0.5, 0.5, 1.0], variants = [[ -0.1, 5.0, 0.5, 0.5, 1.0],
[ -0.1, 5.0, 0.5, 0.5, 2.0], [ -0.1, 5.0, 0.5, 0.5, 2.0],
[ -0.1, 5.0, 0.2, 0.2, 1.0], [ -0.1, 5.0, 0.2, 0.2, 1.0],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment