Commit 79c4adb7 authored by Andrey Filippov's avatar Andrey Filippov

continue splitting code to multiple files

parent 69def1b5
#!/usr/bin/env python3
from numpy import float64
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import npairs_loss
from debian.deb822 import PdiffIndex
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
from PIL import Image
import os
import sys
import glob
import numpy as np
import itertools
import time
import matplotlib.pyplot as plt
import shutil
from threading import Thread
#import imagej_tiffwriter
import qcstereo_network
import qcstereo_losses
import qcstereo_functions as qsf
#import xml.etree.ElementTree as ET
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=",SLOSS_CLIP)
globals().update(parameters)
#exit(0)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
NN_LAYOUTS = {0:[0, 0, 0, 32, 20, 16],
1:[0, 0, 0, 256, 128, 64],
2:[0, 128, 32, 32, 32, 16],
3:[0, 0, 40, 32, 20, 16],
4:[0, 0, 0, 0, 16, 16],
5:[0, 0, 64, 32, 32, 16],
6:[0, 0, 32, 16, 16, 16],
7:[0, 0, 64, 16, 16, 16],
8:[0, 0, 0, 64, 20, 16],
9:[0, 0, 256, 64, 32, 16],
10:[0, 256, 128, 64, 32, 16],
11:[0, 0, 0, 0, 64, 32],
}
NN_LAYOUT1 = NN_LAYOUTS[NET_ARCH1]
NN_LAYOUT2 = NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
#reading to memory (testing)
train_next = [{'file':0, 'slot':0, 'files':0, 'slots':0},
{'file':0, 'slot':0, 'files':0, 'slots':0}]
if TWO_TRAINS:
train_next += [{'file':0, 'slot':0, 'files':0, 'slots':0},
{'file':0, 'slot':0, 'files':0, 'slots':0}]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
partials = None
partials = qsf.concentricSquares(CLUSTER_RADIUS)
PARTIALS_WEIGHTS = [1.0*pw/sum(PARTIALS_WEIGHTS) for pw in PARTIALS_WEIGHTS]
if not USE_PARTIALS:
partials = partials[0:1]
PARTIALS_WEIGHTS = [1.0]
import tensorflow as tf
#import tensorflow.contrib.slim as slim
qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS)
image_data = qsf.initImageData(
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = CLUSTER_RADIUS,
width = IMG_WIDTH,
replace_nans = True)
datasets_train, datasets_test, num_train_sets= qsf.initTrainTestData(
files = files,
cluster_radius = CLUSTER_RADIUS,
max_files_per_group = MAX_FILES_PER_GROUP, # shuffling buffer for files
two_trains = TWO_TRAINS,
train_next = train_next)
corr2d_train_placeholder = tf.placeholder(datasets_train[0]['corr2d'].dtype, (None,FEATURES_PER_TILE * cluster_size)) # corr2d_train.shape)
target_disparity_train_placeholder = tf.placeholder(datasets_train[0]['target_disparity'].dtype, (None,1 * cluster_size)) #target_disparity_train.shape)
gt_ds_train_placeholder = tf.placeholder(datasets_train[0]['gt_ds'].dtype, (None,2 * cluster_size)) #gt_ds_train.shape)
dataset_tt = tf.data.Dataset.from_tensor_slices({
"corr2d":corr2d_train_placeholder,
"target_disparity": target_disparity_train_placeholder,
"gt_ds": gt_ds_train_placeholder})
tf_batch_weights = tf.placeholder(shape=(None,), dtype=tf.float32, name = "batch_weights") # way to increase importance of the high variance clusters
feed_batch_weights = np.array(BATCH_WEIGHTS*(BATCH_SIZE//len(BATCH_WEIGHTS)), dtype=np.float32)
feed_batch_weight_1 = np.array([1.0], dtype=np.float32)
dataset_train_size = len(datasets_train[0]['corr2d'])
dataset_train_size //= BATCH_SIZE
dataset_test_size = len(datasets_test[0]['corr2d'])
dataset_test_size //= BATCH_SIZE
#dataset_img_size = len(datasets_img[0]['corr2d'])
dataset_img_size = len(image_data[0]['corr2d'])
dataset_img_size //= BATCH_SIZE
dataset_tt = dataset_tt.batch(BATCH_SIZE)
dataset_tt = dataset_tt.prefetch(BATCH_SIZE)
iterator_tt = dataset_tt.make_initializable_iterator()
next_element_tt = iterator_tt.get_next()
#https://www.tensorflow.org/versions/r1.5/programmers_guide/datasets
result_dir = './attic/result_neibs_'+ SUFFIX+'/'
checkpoint_dir = './attic/result_neibs_'+ SUFFIX+'/'
save_freq = 500
def debug_gt_variance(
indx, # This tile index (0..8)
center_indx, # center tile index
gt_ds_batch # [?:9:2]
):
with tf.name_scope("Debug_GT_Variance"):
tf_num_tiles = tf.shape(gt_ds_batch)[0]
d_gt_this = tf.reshape(gt_ds_batch[:,2 * indx],[-1], name = "d_this")
d_gt_center = tf.reshape(gt_ds_batch[:,2 * center_indx],[-1], name = "d_center")
d_gt_diff = tf.subtract(d_gt_this, d_gt_center, name = "d_diff")
d_gt_diff2 = tf.multiply(d_gt_diff, d_gt_diff, name = "d_diff2")
d_gt_var = tf.reduce_mean(d_gt_diff2, name = "d_gt_var")
return d_gt_var
#def batchLoss
target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
target_disparity_cluster], axis=2, name = "corr2d_Nx325")
if SPREAD_CONVERGENCE:
outs, inp_weights = qcstereo_network.networks_siam(
input = corr2d_Nx325,
input_global = target_disparity_cluster,
layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2,
inter_convergence = INTER_CONVERGENCE,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE, #Remove/put None for normal operation
partials = partials,
use_confidence= USE_CONFIDENCE)
else:
outs, inp_weights = qcstereo_network.networks_siam(
input= corr2d_Nx325,
input_global = None,
layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2,
inter_convergence = False,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE, #Remove/put None for normal operation
partials = partials,
use_confidence= USE_CONFIDENCE)
tf_partial_weights = tf.constant(PARTIALS_WEIGHTS,dtype=tf.float32,name="partial_weights")
G_losses = [0.0]*len(partials)
target_disparity_batch= next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1]
gt_ds_batch_clust = next_element_tt['gt_ds']
#gt_ds_batch = next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)]
gt_ds_batch = gt_ds_batch_clust[:,2 * center_tile_index: 2 * (center_tile_index +1)]
G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _cost1 = qcstereo_losses.batchLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result
target_disparity_batch= target_disparity_batch, # next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1], # target_disparity_batch_center, # next_element_tt['target_disparity'], # target_disparity, ### target_d, # [batch_size] tf placeholder
gt_ds_batch = gt_ds_batch, # next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)], # gt_ds_batch_center, ## next_element_tt['gt_ds'], # gt_ds, ### gt, # [batch_size,2] tf placeholder
batch_weights = tf_batch_weights,
disp_diff_cap = DISP_DIFF_CAP,
disp_diff_slope= DISP_DIFF_SLOPE,
absolute_disparity = ABSOLUTE_DISPARITY,
use_confidence = USE_CONFIDENCE, # True,
lambda_conf_avg = 0.01,
lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 2.0,
error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
G_loss = G_losses[0]
for n in range (1,len(partials)):
G_losses[n], _, _, _, _, _, _, _ = qcstereo_losses.batchLoss(
out_batch = outs[n], # [batch_size,(1..2)] tf_result
target_disparity_batch= target_disparity_batch, #next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1], # target_disparity_batch_center, # next_element_tt['target_disparity'], # target_disparity, ### target_d, # [batch_size] tf placeholder
gt_ds_batch = gt_ds_batch, # next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)], # gt_ds_batch_center, ## next_element_tt['gt_ds'], # gt_ds, ### gt, # [batch_size,2] tf placeholder
batch_weights = tf_batch_weights,
disp_diff_cap = DISP_DIFF_CAP,
disp_diff_slope= DISP_DIFF_SLOPE,
absolute_disparity = ABSOLUTE_DISPARITY,
use_confidence = USE_CONFIDENCE, # True,
lambda_conf_avg = 0.01,
lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 2.0,
error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
tf_wlosses = tf.multiply(G_losses, tf_partial_weights, name = "tf_wlosses")
G_losses_sum = tf.reduce_sum(tf_wlosses, name = "G_losses_sum")
if SLOSS_LAMBDA > 0:
S_loss, rslt_cost_nw, rslt_cost_w, rslt_d , rslt_avg_disparity, rslt_gt_disparity, rslt_offs = qcstereo_losses.smoothLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result
target_disparity_batch = target_disparity_batch, # [batch_size] tf placeholder
gt_ds_batch_clust = gt_ds_batch_clust, # [batch_size,25,2] tf placeholder
clip = SLOSS_CLIP,
absolute_disparity = ABSOLUTE_DISPARITY, #when false there should be no activation on disparity output !
cluster_radius = CLUSTER_RADIUS)
GS_loss = tf.add(G_losses_sum, SLOSS_LAMBDA * S_loss, name = "GS_loss")
else:
S_loss = tf.constant(0.0, dtype=tf.float32,name = "S_loss")
GS_loss = G_losses_sum # G_loss
# G_loss += Glosses[n]*PARTIALS_WEIGHTS[n]
#tf_partial_weights
if WLOSS_LAMBDA > 0.0:
W_loss = qcstereo_losses.weightsLoss(
inp_weights = inp_weights[0], # inp_weights - list of tensors, currently - just [0]
tile_layers= TILE_LAYERS, # 4
tile_side = TILE_SIDE, # 9
wborders_zero = WBORDERS_ZERO)
# GW_loss = tf.add(G_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
GW_loss = tf.add(GS_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
else:
GW_loss = GS_loss # G_loss
W_loss = tf.constant(0.0, dtype=tf.float32,name = "W_loss")
#debug
GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8)
center_indx = 4, # center tile index
gt_ds_batch = next_element_tt['gt_ds'])# [?:18]
tf_ph_G_loss = tf.placeholder(tf.float32,shape=None,name='G_loss_avg')
tf_ph_G_losses = tf.placeholder(tf.float32,shape=[len(partials)],name='G_losses_avg')
tf_ph_S_loss = tf.placeholder(tf.float32,shape=None,name='S_loss_avg')
tf_ph_W_loss = tf.placeholder(tf.float32,shape=None,name='W_loss_avg')
tf_ph_GW_loss = tf.placeholder(tf.float32,shape=None,name='GW_loss_avg')
tf_ph_sq_diff = tf.placeholder(tf.float32,shape=None,name='sq_diff_avg')
tf_gtvar_diff = tf.placeholder(tf.float32,shape=None,name='gtvar_diff')
tf_img_test0 = tf.placeholder(tf.float32,shape=None,name='img_test0')
tf_img_test9 = tf.placeholder(tf.float32,shape=None,name='img_test9')
with tf.name_scope('sample'):
tf.summary.scalar("GW_loss", GW_loss)
tf.summary.scalar("G_loss", G_loss)
tf.summary.scalar("S_loss", S_loss)
tf.summary.scalar("W_loss", W_loss)
tf.summary.scalar("sq_diff", _cost1)
tf.summary.scalar("gtvar_diff", GT_variance)
with tf.name_scope('epoch_average'):
# for i, tl in enumerate(tf_ph_G_losses):
# tf.summary.scalar("GW_loss_epoch_"+str(i), tl)
for i in range(tf_ph_G_losses.shape[0]):
tf.summary.scalar("G_loss_epoch_"+str(i), tf_ph_G_losses[i])
tf.summary.scalar("GW_loss_epoch", tf_ph_GW_loss)
tf.summary.scalar("G_loss_epoch", tf_ph_G_loss)
tf.summary.scalar("S_loss_epoch", tf_ph_S_loss)
tf.summary.scalar("W_loss_epoch", tf_ph_W_loss)
tf.summary.scalar("sq_diff_epoch", tf_ph_sq_diff)
tf.summary.scalar("gtvar_diff", tf_gtvar_diff)
tf.summary.scalar("img_test0", tf_img_test0)
tf.summary.scalar("img_test9", tf_img_test9)
t_vars= tf.trainable_variables()
lr= tf.placeholder(tf.float32)
G_opt= tf.train.AdamOptimizer(learning_rate=lr).minimize(GW_loss)
ROOT_PATH = './attic/nn_ds_neibs16_graph'+SUFFIX+"/"
TRAIN_PATH = ROOT_PATH + 'train'
TEST_PATH = ROOT_PATH + 'test'
TEST_PATH1 = ROOT_PATH + 'test1'
# CLEAN OLD STAFF
shutil.rmtree(TRAIN_PATH, ignore_errors=True)
shutil.rmtree(TEST_PATH, ignore_errors=True)
shutil.rmtree(TEST_PATH1, ignore_errors=True)
WIDTH=324
HEIGHT=242
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(TRAIN_PATH, sess.graph)
test_writer = tf.summary.FileWriter(TEST_PATH, sess.graph)
test_writer1 = tf.summary.FileWriter(TEST_PATH1, sess.graph)
loss_gw_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_g_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_g_train_hists= [np.empty(dataset_train_size, dtype=np.float32) for p in partials]
loss_s_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_w_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_gw_test_hist= np.empty(dataset_test_size, dtype=np.float32)
# loss_g_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_g_test_hists= [np.empty(dataset_test_size, dtype=np.float32) for p in partials]
loss_s_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_w_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss2_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss2_test_hist= np.empty(dataset_test_size, dtype=np.float32)
train_gw_avg = 0.0
train_g_avg = 0.0
train_g_avgs = [0.0]*len(partials)
train_w_avg = 0.0
train_s_avg = 0.0
test_gw_avg = 0.0
test_g_avg = 0.0
test_g_avgs = [0.0]*len(partials)
test_w_avg = 0.0
test_s_avg = 0.0
train2_avg = 0.0
test2_avg = 0.0
gtvar_train_hist= np.empty(dataset_train_size, dtype=np.float32)
gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train = 0.0
gtvar_test = 0.0
gtvar_train_avg = 0.0
gtvar_test_avg = 0.0
img_gain_test0 = 1.0
img_gain_test9 = 1.0
num_train_variants = len(datasets_train)
thr=None;
trains_to_update = [train_next[n_train]['files'] > train_next[n_train]['slots'] for n_train in range(len(train_next))]
for epoch in range (EPOCHS_TO_RUN):
"""
update files after each epoch, all 4.
Convert to threads after testing
"""
if (FILE_UPDATE_EPOCHS > 0) and (epoch % FILE_UPDATE_EPOCHS == 0):
if not thr is None:
if thr.is_alive():
qsf.print_time("Waiting until tfrecord gets loaded", end=" ")
else:
qsf.print_time("tfrecord is already loaded loaded", end=" ")
thr.join()
qsf.print_time("Done")
qsf.print_time("Inserting new data", end=" ")
for n_train in range(len(trains_to_update)):
if trains_to_update[n_train]:
# print("n_train= %d, len(thr_result)=%d"%(n_train,len(thr_result)))
qsf.replaceNextDataset(datasets_train,
thr_result[n_train],
train_next= train_next[n_train],
nset=n_train,
period=len(train_next))
qsf._nextFileSlot(train_next[n_train])
qsf.print_time("Done")
thr_result = []
fpaths = []
for n_train in range(len(train_next)):
if train_next[n_train]['files'] > train_next[n_train]['slots']:
fpaths.append(files['train'][n_train][train_next[n_train]['file']])
qsf.print_time("Will read in background: "+fpaths[-1])
thr = Thread(target=qsf.getMoreFiles, args=(fpaths,thr_result, CLUSTER_RADIUS, HOR_FLIP, TILE_LAYERS, TILE_SIDE))
thr.start()
file_index = epoch % num_train_variants
if epoch >=600:
learning_rate = LR600
elif epoch >=400:
learning_rate = LR400
elif epoch >=200:
learning_rate = LR200
elif epoch >=100:
learning_rate = LR100
else:
learning_rate = LR
# print ("sr1",file=sys.stderr,end=" ")
if (file_index == 0) and SHUFFLE_FILES:
num_train_sets # num_sets = len(datasets_train_all)
qsf.print_time("Shuffling how datasets datasets_train_lvar and datasets_train_hvar are zipped together", end="")
for i in range(num_train_sets):
qsf.shuffle_in_place (datasets_train, i, num_train_sets)
qsf.print_time(" Done")
qsf.print_time("Shuffling tile chunks ", end="")
qsf.shuffle_chunks_in_place (datasets_train, 1)
qsf.print_time(" Done")
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: datasets_train[file_index]['corr2d'],
target_disparity_train_placeholder: datasets_train[file_index]['target_disparity'],
gt_ds_train_placeholder: datasets_train[file_index]['gt_ds']})
for i in range(dataset_train_size):
try:
# train_summary,_, GW_loss_trained, G_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
train_summary,_, GW_loss_trained, G_losses_trained, S_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[ merged,
G_opt,
GW_loss,
# G_loss,
G_losses,
S_loss,
W_loss,
outs[0],
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
GT_variance
],
feed_dict={tf_batch_weights: feed_batch_weights,
lr: learning_rate,
tf_ph_GW_loss: train_gw_avg,
tf_ph_G_loss: train_g_avgs[0], #train_g_avg,
tf_ph_G_losses: train_g_avgs,
tf_ph_S_loss: train_s_avg,
tf_ph_W_loss: train_w_avg,
tf_ph_sq_diff: train2_avg,
tf_gtvar_diff: gtvar_train_avg,
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9}) # previous value of *_avg #Fetch argument 0.0 has invalid type <class 'float'>, must be a string or Tensor. (Can not convert a float into a Tensor or Operation.)
loss_gw_train_hist[i] = GW_loss_trained
# loss_g_train_hist[i] = G_loss_trained
for nn, gl in enumerate(G_losses_trained):
loss_g_train_hists[nn][i] = gl
loss_s_train_hist[i] = S_loss_trained
loss_w_train_hist[i] = W_loss_trained
loss2_train_hist[i] = out_cost1
gtvar_train_hist[i] = gt_variance
except tf.errors.OutOfRangeError:
print("train done at step %d"%(i))
break
train_gw_avg = np.average(loss_gw_train_hist).astype(np.float32)
train_g_avg = np.average(loss_g_train_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_train_hists):
train_g_avgs[nn] = np.average(lgth).astype(np.float32)
###############
train_s_avg = np.average(loss_s_train_hist).astype(np.float32)
train_w_avg = np.average(loss_w_train_hist).astype(np.float32)
train2_avg = np.average(loss2_train_hist).astype(np.float32)
gtvar_train_avg = np.average(gtvar_train_hist).astype(np.float32)
test_summaries = [0.0]*len(datasets_test)
tst_avg = [0.0]*len(datasets_test)
tst2_avg = [0.0]*len(datasets_test)
for ntest,dataset_test in enumerate(datasets_test):
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_test['corr2d'],
target_disparity_train_placeholder: dataset_test['target_disparity'],
gt_ds_train_placeholder: dataset_test['gt_ds']})
for i in range(dataset_test_size):
try:
test_summaries[ntest], GW_loss_tested, G_losses_tested, S_loss_tested, W_loss_tested, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[merged,
GW_loss,
G_losses,
S_loss,
W_loss,
outs[0],
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
GT_variance
],
feed_dict={tf_batch_weights: feed_batch_weight_1 , # feed_batch_weights,
lr: learning_rate,
tf_ph_GW_loss: test_gw_avg,
tf_ph_G_loss: test_g_avg,
tf_ph_G_losses: test_g_avgs, # train_g_avgs, # temporary, there is o data fro test
tf_ph_S_loss: test_s_avg,
tf_ph_W_loss: test_w_avg,
tf_ph_sq_diff: test2_avg,
tf_gtvar_diff: gtvar_test_avg,
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9}) # previous value of *_avg
loss_gw_test_hist[i] = GW_loss_tested
for nn, gl in enumerate(G_losses_tested):
loss_g_test_hists[nn][i] = gl
loss_s_test_hist[i] = S_loss_tested
loss_w_test_hist[i] = W_loss_tested
loss2_test_hist[i] = out_cost1
gtvar_test_hist[i] = gt_variance
except tf.errors.OutOfRangeError:
print("test done at step %d"%(i))
break
test_gw_avg = np.average(loss_gw_test_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_test_hists):
test_g_avgs[nn] = np.average(lgth).astype(np.float32)
test_s_avg = np.average(loss_s_test_hist).astype(np.float32)
test_w_avg = np.average(loss_w_test_hist).astype(np.float32)
tst_avg[ntest] = test_gw_avg
test2_avg = np.average(loss2_test_hist).astype(np.float32)
tst2_avg[ntest] = test2_avg
gtvar_test_avg = np.average(gtvar_test_hist).astype(np.float32)
train_writer.add_summary(train_summary, epoch)
test_writer.add_summary(test_summaries[0], epoch)
test_writer1.add_summary(test_summaries[1], epoch)
qsf.print_time("%d:%d -> %f %f %f (%f %f %f) dbg:%f %f"%(epoch,i,train_gw_avg, tst_avg[0], tst_avg[1], train2_avg, tst2_avg[0], tst2_avg[1], gtvar_train_avg, gtvar_test_avg))
if (((epoch + 1) == EPOCHS_TO_RUN) or (((epoch + 1) % EPOCHS_FULL_TEST) == 0)) and (len(image_data) > 0) :
last_epoch = (epoch + 1) == EPOCHS_TO_RUN
ind_img = [0]
if last_epoch:
ind_img = [i for i in range(len(image_data))]
###################################################
# Read the full image
###################################################
test_summaries_img = [0.0]*len(ind_img) # datasets_img)
disp_out= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_d= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_offs = np.empty((WIDTH*HEIGHT), dtype=np.float32)
for ntest in ind_img: # datasets_img):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = ntest,
cluster_radius = CLUSTER_RADIUS,
width = IMG_WIDTH,
replace_nans = True)
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_img['corr2d'],
target_disparity_train_placeholder: dataset_img['target_disparity'],
gt_ds_train_placeholder: dataset_img['gt_ds']})
for start_offs in range(0,disp_out.shape[0],BATCH_SIZE):
end_offs = min(start_offs+BATCH_SIZE,disp_out.shape[0])
try:
test_summaries_img[ntest],output, cost_nw, cost_w, dd, avg_disparity, gt_disparity, offs = sess.run(
[merged,
outs[0], # {?,1]
rslt_cost_nw, #[?,]
rslt_cost_w, #[?,]
rslt_d, #[?,]
rslt_avg_disparity,
rslt_gt_disparity,
rslt_offs
],
feed_dict={
tf_batch_weights: feed_batch_weight_1, # feed_batch_weights,
# lr: learning_rate,
tf_ph_GW_loss: test_gw_avg,
tf_ph_G_loss: test_g_avg,
tf_ph_G_losses: train_g_avgs, # temporary, there is o data for test
tf_ph_S_loss: test_s_avg,
tf_ph_W_loss: test_w_avg,
tf_ph_sq_diff: test2_avg,
tf_gtvar_diff: gtvar_test_avg,
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9}) # previous value of *_avg
except tf.errors.OutOfRangeError:
print("test done at step %d"%(i))
break
try:
disp_out[start_offs:end_offs] = output.flatten()
dbg_cost_nw[start_offs:end_offs] = cost_nw.flatten()
dbg_cost_w [start_offs:end_offs] = cost_w.flatten()
dbg_d[start_offs:end_offs] = dd.flatten()
dbg_avg_disparity[start_offs:end_offs] = avg_disparity.flatten()
dbg_gt_disparity[start_offs:end_offs] = gt_disparity.flatten()
dbg_offs[start_offs:end_offs] = offs.flatten()
except ValueError:
print("dataset_img_size= %d, i=%d, output.shape[0]=%d "%(dataset_img_size, i, output.shape[0]))
break;
pass
result_file = files['result'][ntest] # result_files[ntest]
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
# rslt = np.concatenate([disp_out.reshape(-1,1), t_disp, gtruth],1)
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
dbg_cost_nw.reshape(-1,1),
dbg_cost_w.reshape(-1,1),
dbg_d.reshape(-1,1),
dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1)],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY,radius=CLUSTER_RADIUS)
img_gain_test0 = rslt[0][0]/rslt[0][1]
img_gain_test9 = rslt[9][0]/rslt[9][1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
if ntest > 0:
image_data[ntest] = None
# Close writers
train_writer.close()
test_writer.close()
test_writer1.close()
#reports error: Exception ignored in: <bound method BaseSession.__del__ of <tensorflow.python.client.session.Session object at 0x7efc5f720ef0>> if there is no print before exit()
print("All done")
exit (0)
#!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from numpy import float64
import os
import numpy as np
import tensorflow as tf
import xml.etree.ElementTree as ET
import time
import imagej_tiffwriter
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end, flush=True)
TIME_LAST = t
def parseXmlConfig(conf_file, root_dir):
tree = ET.parse(conf_file)
root = tree.getroot()
directories = root.find('directories')
files = root.find('files')
parameters = {}
for p in root.find('parameters'):
parameters[p.tag]=eval(p.text.strip())
globals
dirs={}
for p in root.find('directories'):
dirs[p.tag]=eval(p.text.strip())
if not os.path.isabs(dirs[p.tag]):
dirs[p.tag] = os.path.join(root_dir, dirs[p.tag])
files={}
for p in root.find('files'):
files[p.tag]=eval(p.text.strip())
# globals().update(parameters)
return parameters, dirs, files
def prepareFiles(dirs, files, suffix):
#MAX_FILES_PER_GROUP
for i, path in enumerate(files['train_lvar']):
files['train_lvar'][i]=os.path.join(dirs['train_lvar'], path)
for i, path in enumerate(files['train_hvar']):
files['train_hvar'][i]=os.path.join(dirs['train_hvar'], path)
for i, path in enumerate(files['train_lvar1']):
files['train_lvar1'][i]=os.path.join(dirs['train_lvar1'], path)
for i, path in enumerate(files['train_hvar1']):
files['train_hvar1'][i]=os.path.join(dirs['train_hvar1'], path)
for i, path in enumerate(files['test_lvar']):
files['test_lvar'][i]=os.path.join(dirs['test_lvar'], path)
for i, path in enumerate(files['test_hvar']):
files['test_hvar'][i]=os.path.join(dirs['test_hvar'], path)
result_files=[]
for i, path in enumerate(files['images']):
result_files.append(os.path.join(dirs['result'], path+"_"+suffix+'.npy'))
files['result'] = result_files
files['train'] = [files['train_lvar'],files['train_hvar'], files['train_lvar1'], files['train_hvar1']]
# should be after result files
for i, path in enumerate(files['images']):
files['images'][i] = os.path.join(dirs['images'], path+'.tfrecords')
def readTFRewcordsEpoch(train_filename):
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
npy_dir_name = "npy"
dirname = os.path.dirname(train_filename)
npy_dir = os.path.join(dirname, npy_dir_name)
filebasename, file_extension = os.path.splitext(train_filename)
filebasename = os.path.basename(filebasename)
file_corr2d = os.path.join(npy_dir,filebasename + '_corr2d.npy')
file_target_disparity = os.path.join(npy_dir,filebasename + '_target_disparity.npy')
file_gt_ds = os.path.join(npy_dir,filebasename + '_gt_ds.npy')
if (os.path.exists(file_corr2d) and
os.path.exists(file_target_disparity) and
os.path.exists(file_gt_ds)):
corr2d= np.load (file_corr2d)
target_disparity = np.load(file_target_disparity)
gt_ds = np.load(file_gt_ds)
pass
else:
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append (np.array(example.features.feature['corr2d'].float_list.value, dtype=np.float32))
target_disparity_list.append (np.array(example.features.feature['target_disparity'].float_list.value, dtype=np.float32))
gt_ds_list.append (np.array(example.features.feature['gt_ds'].float_list.value, dtype= np.float32))
pass
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
try:
os.makedirs(os.path.dirname(file_corr2d))
except:
pass
np.save(file_corr2d, corr2d)
np.save(file_target_disparity, target_disparity)
np.save(file_gt_ds, gt_ds)
return corr2d, target_disparity, gt_ds
def getMoreFiles(fpaths,rslt, cluster_radius, hor_flip, tile_layers, tile_side):
for fpath in fpaths:
corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(fpath)
dataset = {"corr2d": corr2d,
"target_disparity": target_disparity,
"gt_ds": gt_ds}
"""
if FILE_TILE_SIDE > TILE_SIDE:
reduce_tile_size([dataset], TILE_LAYERS, TILE_SIDE)
"""
reformat_to_clusters([dataset], cluster_radius)
if hor_flip:
if np.random.randint(2):
print_time("Performing horizontal flip", end=" ")
flip_horizontal([dataset], cluster_radius, tile_layers, tile_side)
print_time("Done")
rslt.append(dataset)
#from http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/21/tfrecords-guide/
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'corr2d': tf.FixedLenFeature([FEATURES_PER_TILE],tf.float32), #string),
'target_disparity': tf.FixedLenFeature([1], tf.float32), #.string),
'gt_ds': tf.FixedLenFeature([2], tf.float32) #.string)
})
corr2d = features['corr2d'] # tf.decode_raw(features['corr2d'], tf.float32)
target_disparity = features['target_disparity'] # tf.decode_raw(features['target_disparity'], tf.float32)
gt_ds = tf.cast(features['gt_ds'], tf.float32) # tf.decode_raw(features['gt_ds'], tf.float32)
in_features = tf.concat([corr2d,target_disparity],0)
# still some nan-s in correlation data?
# in_features_clean = tf.where(tf.is_nan(in_features), tf.zeros_like(in_features), in_features)
# corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features_clean, target_disparity, gt_ds],
corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features, target_disparity, gt_ds],
batch_size=1000, # 2,
capacity=30,
num_threads=2,
min_after_dequeue=10)
return corr2d_out, target_disparity_out, gt_ds_out
#http://adventuresinmachinelearning.com/introduction-tensorflow-queuing/
def add_margins(npa,radius, val = np.nan):
npa_ext = np.empty((npa.shape[0]+2*radius, npa.shape[1]+2*radius, npa.shape[2]), dtype = npa.dtype)
npa_ext[radius:radius + npa.shape[0],radius:radius + npa.shape[1]] = npa
npa_ext[0:radius,:,:] = val
npa_ext[radius + npa.shape[0]:,:,:] = val
npa_ext[:,0:radius,:] = val
npa_ext[:, radius + npa.shape[1]:,:] = val
return npa_ext
def add_neibs(npa_ext,radius):
height = npa_ext.shape[0]-2*radius
width = npa_ext.shape[1]-2*radius
side = 2 * radius + 1
size = side * side
npa_neib = np.empty((height, width, side, side, npa_ext.shape[2]), dtype = npa_ext.dtype)
for dy in range (side):
for dx in range (side):
npa_neib[:,:,dy, dx,:]= npa_ext[dy:dy+height, dx:dx+width]
return npa_neib.reshape(height, width, -1)
def extend_img_to_clusters(datasets_img,radius, width): # = 324):
side = 2 * radius + 1
size = side * side
if len(datasets_img) ==0:
return
num_tiles = datasets_img[0]['corr2d'].shape[0]
height = num_tiles // width
for rec in datasets_img:
if not rec is None:
rec['corr2d'] = add_neibs(add_margins(rec['corr2d'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
rec['target_disparity'] = add_neibs(add_margins(rec['target_disparity'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
rec['gt_ds'] = add_neibs(add_margins(rec['gt_ds'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
pass
def reformat_to_clusters(datasets_data, cluster_radius):
cluster_size = (2 * cluster_radius + 1) * (2 * cluster_radius + 1)
# Reformat input data
for rec in datasets_data:
rec['corr2d'] = rec['corr2d'].reshape( (rec['corr2d'].shape[0]//cluster_size, rec['corr2d'].shape[1] * cluster_size))
rec['target_disparity'] = rec['target_disparity'].reshape((rec['target_disparity'].shape[0]//cluster_size, rec['target_disparity'].shape[1] * cluster_size))
rec['gt_ds'] = rec['gt_ds'].reshape( (rec['gt_ds'].shape[0]//cluster_size, rec['gt_ds'].shape[1] * cluster_size))
def flip_horizontal(datasets_data, cluster_radius, tile_layers, tile_side):
cluster_side = 2 * cluster_radius + 1
cluster_size = cluster_side * cluster_side
"""
TILE_LAYERS = 4
TILE_SIDE = 9 # 7
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
"""
for rec in datasets_data:
corr2d = rec['corr2d'].reshape( (rec['corr2d'].shape[0], cluster_side, cluster_side, tile_layers, tile_side, tile_side))
target_disparity = rec['target_disparity'].reshape((rec['corr2d'].shape[0], cluster_side, cluster_side, -1))
gt_ds = rec['gt_ds'].reshape( (rec['corr2d'].shape[0], cluster_side, cluster_side, -1))
"""
Horizontal flip of tiles
"""
corr2d = corr2d[:,:,::-1,...]
target_disparity = target_disparity[:,:,::-1,...]
gt_ds = gt_ds[:,:,::-1,...]
corr2d[:,:,:,0,:,:] = corr2d[:,:,:,0,::-1,:] # flip vertical layer0 (hor)
corr2d[:,:,:,1,:,:] = corr2d[:,:,:,1,:,::-1] # flip horizontal layer1 (vert)
corr2d_2 = corr2d[:,:,:,3,::-1,:].copy() # flip vertical layer3 (diago)
corr2d[:,:,:,3,:,:] = corr2d[:,:,:,2,::-1,:] # flip vertical layer2 (diago)
corr2d[:,:,:,2,:,:] = corr2d_2
rec['corr2d'] = corr2d.reshape((corr2d.shape[0],-1))
rec['target_disparity'] = target_disparity.reshape((target_disparity.shape[0],-1))
rec['gt_ds'] = gt_ds.reshape((gt_ds.shape[0],-1))
def replace_nan(datasets_data, cluster_radius):
cluster_size = (2 * cluster_radius + 1) * (2 * cluster_radius + 1)
# Reformat input data
for rec in datasets_data:
if not rec is None:
np.nan_to_num(rec['corr2d'], copy = False)
np.nan_to_num(rec['target_disparity'], copy = False)
np.nan_to_num(rec['gt_ds'], copy = False)
def permute_to_swaps(perm):
pairs = []
for i in range(len(perm)):
w = np.where(perm == i)[0][0]
if w != i:
pairs.append([i,w])
perm[w] = perm[i]
perm[i] = i
return pairs
def shuffle_in_place(datasets_data, indx, period):
swaps = permute_to_swaps(np.random.permutation(len(datasets_data)))
num_entries = datasets_data[0]['corr2d'].shape[0] // period
for swp in swaps:
ds0 = datasets_data[swp[0]]
ds1 = datasets_data[swp[1]]
tmp = ds0['corr2d'][indx::period].copy()
ds0['corr2d'][indx::period] = ds1['corr2d'][indx::period]
ds1['corr2d'][indx::period] = tmp
tmp = ds0['target_disparity'][indx::period].copy()
ds0['target_disparity'][indx::period] = ds1['target_disparity'][indx::period]
ds1['target_disparity'][indx::period] = tmp
tmp = ds0['gt_ds'][indx::period].copy()
ds0['gt_ds'][indx::period] = ds1['gt_ds'][indx::period]
ds1['gt_ds'][indx::period] = tmp
def shuffle_chunks_in_place(datasets_data, tiles_groups_per_chunk):
"""
Improve shuffling by preserving indices inside batches (0 <->0, ... 39 <->39 for 40 tile group batches)
"""
num_files = len(datasets_data)
#chunks_per_file = datasets_data[0]['target_disparity']
for nf, ds in enumerate(datasets_data):
groups_per_file = ds['corr2d'].shape[0]
chunks_per_file = groups_per_file//tiles_groups_per_chunk
permut = np.random.permutation(chunks_per_file)
ds['corr2d'] = ds['corr2d']. reshape((chunks_per_file,-1))[permut].reshape((groups_per_file,-1))
ds['target_disparity'] = ds['target_disparity'].reshape((chunks_per_file,-1))[permut].reshape((groups_per_file,-1))
ds['gt_ds'] = ds['gt_ds']. reshape((chunks_per_file,-1))[permut].reshape((groups_per_file,-1))
def _setFileSlot(train_next, files, max_files_per_group):
train_next['files'] = files
train_next['slots'] = min(train_next['files'], max_files_per_group)
def _nextFileSlot(train_next):
train_next['file'] = (train_next['file'] + 1) % train_next['files']
train_next['slot'] = (train_next['slot'] + 1) % train_next['slots']
def replaceNextDataset(datasets_data, new_dataset, train_next, nset,period):
replaceDataset(datasets_data, new_dataset, nset, period, findx = train_next['slot'])
# _nextFileSlot(train_next[nset])
def replaceDataset(datasets_data, new_dataset, nset, period, findx):
"""
Replace one file in the dataset
"""
datasets_data[findx]['corr2d'] [nset::period] = new_dataset['corr2d']
datasets_data[findx]['target_disparity'][nset::period] = new_dataset['target_disparity']
datasets_data[findx]['gt_ds'] [nset::period] = new_dataset['gt_ds']
def zip_lvar_hvar(datasets_all_data, del_src = True):
# cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
# Reformat input data
num_sets_to_combine = len(datasets_all_data)
datasets_data = []
if num_sets_to_combine:
for nrec in range(len(datasets_all_data[0])):
recs = [[] for _ in range(num_sets_to_combine)]
for nset, datasets in enumerate(datasets_all_data):
recs[nset] = datasets[nrec]
rec = {'corr2d': np.empty((recs[0]['corr2d'].shape[0]*num_sets_to_combine, recs[0]['corr2d'].shape[1]),dtype=np.float32),
'target_disparity': np.empty((recs[0]['target_disparity'].shape[0]*num_sets_to_combine,recs[0]['target_disparity'].shape[1]),dtype=np.float32),
'gt_ds': np.empty((recs[0]['gt_ds'].shape[0]*num_sets_to_combine, recs[0]['gt_ds'].shape[1]),dtype=np.float32)}
for nset, reci in enumerate(recs):
rec['corr2d'] [nset::num_sets_to_combine] = recs[nset]['corr2d']
rec['target_disparity'][nset::num_sets_to_combine] = recs[nset]['target_disparity']
rec['gt_ds'] [nset::num_sets_to_combine] = recs[nset]['gt_ds']
if del_src:
for nset in range(num_sets_to_combine):
datasets_all_data[nset][nrec] = None
datasets_data.append(rec)
return datasets_data
# list of dictionaries
def reduce_tile_size(datasets_data, num_tile_layers, reduced_tile_side):
if (not datasets_data is None) and (len (datasets_data) > 0):
tsz = (datasets_data[0]['corr2d'].shape[1])// num_tile_layers # 81 # list index out of range
tss = int(np.sqrt(tsz)+0.5)
offs = (tss - reduced_tile_side) // 2
for rec in datasets_data:
rec['corr2d'] = (rec['corr2d'].reshape((-1, num_tile_layers, tss, tss))
[..., offs:offs+reduced_tile_side, offs:offs+reduced_tile_side].
reshape(-1,num_tile_layers*reduced_tile_side*reduced_tile_side))
def initTrainTestData(
files,
cluster_radius,
max_files_per_group, # shuffling buffer for files
two_trains,
train_next):
datasets_train_lvar = []
datasets_train_hvar = []
datasets_train_lvar1 = []
datasets_train_hvar1 = []
datasets_train_all = [[],[],[],[]]
for n_train, f_train in enumerate(files['train']):
if len(f_train) and ((n_train<2) or two_trains):
_setFileSlot(train_next[n_train], len(f_train), max_files_per_group)
for i, fpath in enumerate(f_train):
if i >= max_files_per_group:
break
print_time("Importing train data "+(["low variance","high variance", "low variance1","high variance1"][n_train]) +" from "+fpath, end="")
corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(fpath)
datasets_train_all[n_train].append({"corr2d":corr2d,
"target_disparity":target_disparity,
"gt_ds":gt_ds})
_nextFileSlot(train_next[n_train])
print_time(" Done")
datasets_test_lvar = []
for fpath in files['test_lvar']:
print_time("Importing test data (low variance) from "+fpath, end="")
corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(fpath)
datasets_test_lvar.append({"corr2d":corr2d,
"target_disparity":target_disparity,
"gt_ds":gt_ds})
print_time(" Done")
datasets_test_hvar = []
for fpath in files['test_hvar']:
print_time("Importing test data (high variance) from "+fpath, end="")
corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(fpath)
datasets_test_hvar.append({"corr2d":corr2d,
"target_disparity":target_disparity,
"gt_ds":gt_ds})
print_time(" Done")
# Reformat to 1/9/25 tile clusters
for n_train, d_train in enumerate(datasets_train_all):
print_time("Reshaping train data ("+(["low variance","high variance", "low variance1","high variance1"][n_train])+") ", end="")
reformat_to_clusters(d_train, cluster_radius)
print_time(" Done")
print_time("Reshaping test data (low variance)", end="")
reformat_to_clusters(datasets_test_lvar, cluster_radius)
print_time(" Done")
print_time("Reshaping test data (high variance)", end="")
reformat_to_clusters(datasets_test_hvar, cluster_radius)
print_time(" Done")
pass
"""
datasets_train_lvar & datasets_train_hvar ( that will increase batch size and placeholders twice
test has to have even original, batches will not zip - just use two batches for one big one
"""
print_time("Zipping together datasets datasets_train_lvar and datasets_train_hvar", end="")
datasets_train = zip_lvar_hvar(datasets_train_all, del_src = True) # no shuffle, delete src
print_time(" Done")
datasets_test = []
for dataset_test_lvar in datasets_test_lvar:
datasets_test.append(dataset_test_lvar)
for dataset_test_hvar in datasets_test_hvar:
datasets_test.append(dataset_test_hvar)
return datasets_train, datasets_test, len(datasets_train_all) # 4
def readImageData(image_data,
files,
indx,
cluster_radius,
width,
replace_nans):
if image_data[indx] is None:
corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(files['images'][indx])
image_data[indx] = {
'corr2d': corr2d,
'target_disparity': target_disparity,
"gt_ds": gt_ds,
"gtruths": gt_ds.copy(),
"t_disps": target_disparity.reshape([-1,1]).copy()}
extend_img_to_clusters(
[image_data[indx]],
cluster_radius,
width)
if replace_nans:
replace_nan([image_data[indx]], cluster_radius)
return image_data[indx]
def initImageData(files,
max_imgs,
cluster_radius,
width,
replace_nans):
num_imgs = len(files['images'])
img_data = [None] * num_imgs
for nfile in range(min(num_imgs, max_imgs)):
print_time("Importing test image data from "+ files['images'][nfile], end="")
readImageData(img_data,files, nfile, cluster_radius, width, replace_nans)
print_time(" Done")
return img_data
def evaluateAllResults(result_files, absolute_disparity, cluster_radius):
for result_file in result_files:
try:
print_time("Reading resuts from "+result_file, end=" ")
eval_results(result_file, absolute_disparity, radius=cluster_radius)
except:
print_time(" - does not exist")
continue
print_time("Done")
print_time("Saving resuts to tiff", end=" ")
result_npy_to_tiff(result_file, absolute_disparity, fix_nan = True)
print_time("Done")
def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True):
"""
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
data will be written as 4-layer tiff, extension '.npy' replaced with '.tiff'
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
"""
tiff_path = npy_path.replace('.npy','.tiff')
data = np.load(npy_path) #(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
if not absolute:
if fix_nan:
data[...,0] += np.nan_to_num(data[...,1], copy=True)
else:
data[...,0] += data[...,1]
if insert_deltas:
data = np.concatenate([data[...,0:4],data[...,0:2],data[...,4:]], axis = 2)
data[...,4] -= data[...,2]
data[...,5] -= data[...,2]
np.nan_to_num(data[...,3], copy=False)
data[...,4] = np.select([data[...,3]==0.0, data[...,3]>0.0], [np.nan,data[...,4]])
data[...,5] = np.select([data[...,3]==0.0, data[...,3]>0.0], [np.nan,data[...,5]])
data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path)
imagej_tiffwriter.save(tiff_path,data[...,np.newaxis])
def eval_results(rslt_path, absolute,
min_disp = -0.1, #minimal GT disparity
max_disp = 20.0, # maximal GT disparity
max_ofst_target = 1.0,
max_ofst_result = 1.0,
str_pow = 2.0,
radius = 0):
# for min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow in [
variants = [[ -0.1, 5.0, 0.5, 0.5, 1.0],
[ -0.1, 5.0, 0.5, 0.5, 2.0],
[ -0.1, 5.0, 0.2, 0.2, 1.0],
[ -0.1, 5.0, 0.2, 0.2, 2.0],
[ -0.1, 20.0, 0.5, 0.5, 1.0],
[ -0.1, 20.0, 0.5, 0.5, 2.0],
[ -0.1, 20.0, 0.2, 0.2, 1.0],
[ -0.1, 20.0, 0.2, 0.2, 2.0],
[ -0.1, 20.0, 1.0, 1.0, 1.0],
[min_disp, max_disp, max_ofst_target, max_ofst_result, str_pow]]
rslt = np.load(rslt_path)
not_nan = ~np.isnan(rslt[...,0])
not_nan &= ~np.isnan(rslt[...,1])
not_nan &= ~np.isnan(rslt[...,2])
not_nan &= ~np.isnan(rslt[...,3])
not_nan_ext = np.zeros((rslt.shape[0] + 2*radius,rslt.shape[1] + 2 * radius),dtype=np.bool)
not_nan_ext[radius:-radius,radius:-radius] = not_nan
for dy in range(2*radius+1):
for dx in range(2*radius+1):
not_nan_ext[dy:dy+not_nan.shape[0], dx:dx+not_nan.shape[1]] &= not_nan
not_nan = not_nan_ext[radius:-radius,radius:-radius]
if not absolute:
rslt[...,0] += rslt[...,1]
nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
target_disparity = np.nan_to_num(rslt[...,1], copy = False)
gt_disparity = np.nan_to_num(rslt[...,2], copy = False)
gt_strength = np.nan_to_num(rslt[...,3], copy = False)
rslt = []
for min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow in variants:
good_tiles = not_nan.copy();
good_tiles &= (gt_disparity >= min_disparity)
good_tiles &= (gt_disparity <= max_disparity)
good_tiles &= (target_disparity != gt_disparity)
good_tiles &= (np.abs(target_disparity - gt_disparity) <= max_offset_target)
good_tiles &= (np.abs(target_disparity - nn_disparity) <= max_offset_result)
gt_w = gt_strength * good_tiles
gt_w = np.power(gt_w,strength_pow)
sw = gt_w.sum()
diff0 = target_disparity - gt_disparity
diff1 = nn_disparity - gt_disparity
diff0_2w = gt_w*diff0*diff0
diff1_2w = gt_w*diff1*diff1
rms0 = np.sqrt(diff0_2w.sum()/sw)
rms1 = np.sqrt(diff1_2w.sum()/sw)
print ("%7.3f<disp<%7.3f, offs_tgt<%5.2f, offs_rslt<%5.2f pwr=%05.3f, rms0=%7.4f, rms1=%7.4f (gain=%7.4f) num good tiles = %5d"%(
min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, rms0, rms1, rms0/rms1, good_tiles.sum() ))
rslt.append([rms0,rms1])
return rslt
def concentricSquares(radius):
side = 2 * radius + 1
return [[((i // side) >= var) and
((i // side) < (side - var)) and
((i % side) >= var) and
((i % side) < (side - var)) for i in range (side*side) ] for var in range(radius+1)]
......@@ -10,6 +10,7 @@ import tensorflow as tf
def smoothLoss(out_batch, # [batch_size,(1..2)] tf_result
target_disparity_batch, # [batch_size] tf placeholder
gt_ds_batch_clust, # [batch_size,25,2] tf placeholder
clip, # limit punishment for cutting corners (disparity pix)
absolute_disparity = False, #when false there should be no activation on disparity output !
cluster_radius = 2):
with tf.name_scope("SmoothLoss"):
......@@ -25,6 +26,7 @@ def smoothLoss(out_batch, # [batch_size,(1..2)] tf_result
for dx in [-1,0,1]:
if (dy != 0) or (dx != 0):
i8.append(center_tile_index+(dy*cluster_side)+dx)
tf_clip = tf.constant(clip, dtype=tf.float32, name = "clip")
tf_gt_ds_all = tf.reshape(gt_ds_batch_clust,[-1,cluster_size,gt_ds_batch_clust.shape[1]//cluster_size], name = "gt_ds_all")
tf_neibs8 = tf.gather(tf_gt_ds_all, indices = i8, axis = 1, name = "neibs8")
tf_gt_disparity8 = tf.reshape(tf_neibs8[:,:,0], [-1,8], name = "gt8_disparity") # (?,8)
......@@ -42,7 +44,7 @@ def smoothLoss(out_batch, # [batch_size,(1..2)] tf_result
tf_gt_strength = tf.reshape(tf_gt_ds_all[:,center_tile_index,1], [-1], name = "gt_strength") # (?,)
tf_d0 = tf.abs(tf_gt_disparity - tf_avg_disparity, name = "tf_d0")
tf_d = tf.maximum(tf_d0, 0.001, name = "tf_d")
tf_d2 = tf.multiply(tf_d, tf_d, name = "tf_d2")
## tf_d2 = tf.multiply(tf_d, tf_d, name = "tf_d2")
tf_out = tf.reshape(out_batch[:,0],[-1], name = "tf_out")
if absolute_disparity:
......@@ -52,12 +54,12 @@ def smoothLoss(out_batch, # [batch_size,(1..2)] tf_result
tf_offs = tf.subtract(tf_out_disparity, tf_avg_disparity, name = "offs")
tf_offs2 = tf.multiply(tf_offs, tf_offs, name = "offs2")
# tf_parab = tf.divide(tf_offs2, tf_d, name = "parab")
# tf_cost_nlim = tf.subtract(tf_d2, tf_offs2, name = "cost_nlim")
tf_offs2_d = tf.divide(tf_offs2, tf_d, name = "offs2_d")
tf_cost0 = tf.maximum(tf_d - tf_offs2_d, 0.0, name = "cost0")
tf_cost_nw = tf.minimum(tf_cost0, tf_clip, name = "cost_nw")
# tf_cost_nw = tf.maximum(tf_d - tf_parab, 0.0, name = "cost_nw")
tf_cost_nw = tf.maximum(tf_d2 - tf_offs2, 0.0, name = "cost_nw")
## tf_cost_nw = tf.maximum(tf_d2 - tf_offs2, 0.0, name = "cost_nw")
tf_cost_w = tf.multiply(tf_cost_nw, tf_gt_strength, name = "cost_w")
tf_sum_wc = tf.reduce_sum(tf_gt_strength, name = "sum_wc")
tf_sum_costw = tf.reduce_sum(tf_cost_w, name = "sum_costw")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment