nn_ds_inmem4_tmp.py 31 KB
Newer Older
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
1 2
#!/usr/bin/env python3
from numpy import float64
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
3
from _stat import S_IEXEC
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30

__copyright__ = "Copyright 2018, Elphel, Inc."
__license__   = "GPL-3.0+"
__email__     = "andrey@elphel.com"


from PIL import Image

import os
import sys
import glob

import numpy as np
import itertools

import time

import matplotlib.pyplot as plt

import shutil

TIME_START = time.time()
TIME_LAST  = TIME_START
DEBUG_LEVEL= 1
DISP_BATCH_BINS =   20 # Number of batch disparity bins
STR_BATCH_BINS =    10 # Number of batch strength bins
FILES_PER_SCENE =    5 # number of random offset files for the scene to select from (0 - use all available)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
31
#MIN_BATCH_CHOICES = 10 # minimal number of tiles in a file for each bin to select from
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
32 33
#MAX_BATCH_FILES =   10 #maximal number of files to use in a batch
MAX_EPOCH =        500
34 35
#LR =               1e-4 # learning rate
LR =               1e-3 # learning rate
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
36
USE_CONFIDENCE =     False
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
37
ABSOLUTE_DISPARITY = True # True # False
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
38 39 40
DEBUG_PLT_LOSS =     True
FEATURES_PER_TILE =  324
EPOCHS_TO_RUN =     10000 #0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
41
RUN_TOT_AVG =       100 # last batches to average. Epoch is 307 training  batches
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
42 43
BATCH_SIZE =       1000 # Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SHUFFLE_EPOCH =    True
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
44
NET_ARCH =           0 # overwrite with argv?
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
45 46
#DEBUG_PACK_TILES = True
SUFFIX=str(NET_ARCH)+ (["R","A"][ABSOLUTE_DISPARITY])
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
47
MAX_TRAIN_FILES_TFR = 6
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKGREEN = '\033[92m'
    WARNING = '\033[38;5;214m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
    BOLDWHITE = '\033[1;37m'
    UNDERLINE = '\033[4m'
def print_time(txt="",end="\n"):
    global TIME_LAST
    t = time.time()
    if txt:
        txt +=" "
    print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end, flush=True)
    TIME_LAST = t
#reading to memory (testing)
def readTFRewcordsEpoch(train_filename):
#    filenames = [train_filename]
#    dataset = tf.data.TFRecordDataset(filenames)
    if not  '.tfrecords' in train_filename:
        train_filename += '.tfrecords'
    record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
    corr2d_list=[]
    target_disparity_list=[]
    gt_ds_list = []
    for string_record in record_iterator:
        example = tf.train.Example()
        example.ParseFromString(string_record)
        corr2d_list.append           (np.array(example.features.feature['corr2d'].float_list.value, dtype=np.float32))
#        target_disparity_list.append(np.array(example.features.feature['target_disparity'].float_list.value[0], dtype=np.float32))
        target_disparity_list.append (np.array(example.features.feature['target_disparity'].float_list.value, dtype=np.float32))
        gt_ds_list.append            (np.array(example.features.feature['gt_ds'].float_list.value, dtype= np.float32))
    corr2d=            np.array(corr2d_list)
    target_disparity = np.array(target_disparity_list)
    gt_ds =            np.array(gt_ds_list)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
86
    return corr2d, target_disparity, gt_ds
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

#from http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/21/tfrecords-guide/
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
        'corr2d':           tf.FixedLenFeature([324],tf.float32), #string),
        'target_disparity': tf.FixedLenFeature([1],   tf.float32), #.string),
        'gt_ds':            tf.FixedLenFeature([2],  tf.float32)  #.string)
        })
    corr2d =           features['corr2d'] # tf.decode_raw(features['corr2d'], tf.float32)
    target_disparity = features['target_disparity'] # tf.decode_raw(features['target_disparity'], tf.float32)
    gt_ds =            tf.cast(features['gt_ds'], tf.float32) # tf.decode_raw(features['gt_ds'], tf.float32)
    in_features = tf.concat([corr2d,target_disparity],0)
    # still some nan-s in correlation data?
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
106
#    in_features_clean = tf.where(tf.is_nan(in_features), tf.zeros_like(in_features), in_features)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
107 108 109 110 111 112 113 114 115 116
#    corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features_clean, target_disparity, gt_ds],
    corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features, target_disparity, gt_ds],
                                                 batch_size=1000, # 2,
                                                 capacity=30,
                                                 num_threads=2,
                                                 min_after_dequeue=10)
    return corr2d_out, target_disparity_out, gt_ds_out

#http://adventuresinmachinelearning.com/introduction-tensorflow-queuing/

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
117 118 119
# Main code

# tfrecords' paths for training
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
120 121 122 123
try:
    train_filenameTFR =  sys.argv[1]
except IndexError:
    train_filenameTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data/train.tfrecords"
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
124 125 126

# if the path is a directory
if os.path.isdir(train_filenameTFR):
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
127
    train_filesTFR = glob.glob(train_filenameTFR+"/*train-*.tfrecords")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
128
    train_filenameTFR = train_filesTFR[0]
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
129 130
else:
    train_filesTFR = [train_filenameTFR]
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
131

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
132
train_filesTFR.sort()
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
133
print("Train tfrecords: "+str(train_filesTFR))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
134 135

# tfrecords' paths for testing
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
136 137 138 139
try:
    test_filenameTFR =  sys.argv[2]
except IndexError:
    test_filenameTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data/test.tfrecords"
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
140 141 142 143

# if the path is a directory
if os.path.isdir(test_filenameTFR):
    test_filesTFR = glob.glob(test_filenameTFR+"/test_*.tfrecords")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
144
    test_filenameTFR = test_filesTFR[0]
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
145 146
else:
    test_filesTFR = [test_filenameTFR]
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
147

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
148
test_filesTFR.sort()
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
149
print("Test tfrecords: "+str(test_filesTFR))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
150

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
151 152 153
# Now we are left with 2 lists - train and test list
n_allowed_train_filesTFR = min(MAX_TRAIN_FILES_TFR,len(train_filesTFR))

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
154 155 156 157

import tensorflow as tf
import tensorflow.contrib.slim as slim

158 159
#print_time("Importing training data... ", end="")
print_time("Importing training data... ")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
160 161 162 163 164 165 166 167

corr2d_trains = [None]*n_allowed_train_filesTFR
target_disparity_trains = [None]*n_allowed_train_filesTFR
gt_ds_trains = [None]*n_allowed_train_filesTFR

# Load maximum files from the list
for i in range(n_allowed_train_filesTFR):
    corr2d_trains[i], target_disparity_trains[i], gt_ds_trains[i] = readTFRewcordsEpoch(train_filesTFR[i])
168
    print_time("Parsed "+train_filesTFR[i])
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
169 170

corr2d_train = corr2d_trains[0]
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
171 172
target_disparity_train = target_disparity_trains[0]
gt_ds_train = gt_ds_trains[0]
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
173

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
174 175 176 177 178 179 180 181 182 183 184 185 186 187
print_time("  Done")

corr2d_train_placeholder =           tf.placeholder(corr2d_train.dtype,           (None,324)) # corr2d_train.shape)
target_disparity_train_placeholder = tf.placeholder(target_disparity_train.dtype, (None,1))  #target_disparity_train.shape)
gt_ds_train_placeholder =            tf.placeholder(gt_ds_train.dtype,            (None,2)) #gt_ds_train.shape)

dataset_train = tf.data.Dataset.from_tensor_slices({
    "corr2d":corr2d_train_placeholder,
    "target_disparity": target_disparity_train_placeholder,
    "gt_ds": gt_ds_train_placeholder})
dataset_train_size = len(corr2d_train)
print_time("dataset_train.output_types "+str(dataset_train.output_types)+", dataset_train.output_shapes "+str(dataset_train.output_shapes)+", number of elements="+str(dataset_train_size))

dataset_train = dataset_train.batch(BATCH_SIZE)
188
dataset_train = dataset_train.prefetch(BATCH_SIZE)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214

dataset_train_size //= BATCH_SIZE
print("dataset_train.output_types "+str(dataset_train.output_types)+", dataset_train.output_shapes "+str(dataset_train.output_shapes)+", number of elements="+str(dataset_train_size))
iterator_train = dataset_train.make_initializable_iterator()
next_element_train = iterator_train.get_next()

print_time("Importing test data... ", end="")
corr2d_test, target_disparity_test, gt_ds_test = readTFRewcordsEpoch(test_filenameTFR)
print_time("  Done")
dataset_test_size = len(corr2d_test)
dataset_test_size //= BATCH_SIZE
"""
iterator_test =  dataset_test.make_initializable_iterator()
next_element_test =  iterator_test.get_next()
"""
#https://www.tensorflow.org/versions/r1.5/programmers_guide/datasets

result_dir = './attic/result_inmem4_'+     SUFFIX+'/'
checkpoint_dir = './attic/result_inmem4_'+ SUFFIX+'/'
save_freq = 500

def lrelu(x):
    return tf.maximum(x*0.2,x)
#    return tf.nn.relu(x)

def network_fc_simple(input, arch = 0):
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
215 216 217

    global image_summary_op1

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
218 219 220 221 222 223 224 225 226
    layouts = {0:[0,   0,   0,   32,  20,  16],
               1:[0,   0,   0,  256, 128,  64],
               2:[0, 128,  32,   32,  32,  16],
               3:[0,   0,  40,   32,  20,  16]}
    layout = layouts[arch]
    last_indx = None;
    fc = []
    for i, num_outs in enumerate (layout):
        if num_outs:
227 228 229
            if fc:
                inp = fc[-1]
            else:
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
230
               inp = input
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
231

232
            fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu,scope='g_fc'+str(i)))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
233 234 235 236
            
            #with tf.variable_scope('g_fc'+str(i)+'/fully_connected',reuse=tf.AUTO_REUSE):
            with tf.variable_scope('g_fc'+str(i),reuse=tf.AUTO_REUSE):
                
237
                w = tf.get_variable('weights',shape=[inp.shape[1],num_outs])
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
238 239 240 241
                #image = tf.get_variable('w_images',shape=[1, inp.shape[1],num_outs,1])
                if (i==3):
                    
                    # red border
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
242 243
                    grid = tf.constant([0.1,-0.1,-0.1],dtype=tf.float32,name="GRID")
                    #grid = tf.constant([255,100,100],dtype=tf.float32,name="GRID")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
                    
                    # (325,32)
                    wimg_1 = w
                    # (32,325)
                    wimg_2 = tf.transpose(wimg_1,[1,0])
                    # (32,324)
                    wimg_3 = wimg_2[:,:-1]
                    
                    # res?
                    #wimg_res = tf.get_variable('wimg_res',shape=[32*(9+1),(9+1)*4, 3])
                    
                    # long list                    
                    tmp1 = []
                    for mi in range(32):
                        tmp2 = []
                        for mj in range(4):
                            
                            s_i = mj*81
                            e_i = (mj+1)*81
                            
                            tile = tf.reshape(wimg_3[mi,s_i:e_i],shape=(9,9))                            
                            tiles = tf.stack([tile]*3,axis=2)
                            
                            #gtiles1 = tf.concat([tiles, tf.reshape(9*[grid],shape=(1,9,3))],axis=0)
                            gtiles1 = tf.concat([tiles,  tf.expand_dims(9*[grid],0)],axis=0)
                            gtiles2 = tf.concat([gtiles1,tf.expand_dims(10*[grid],1)],axis=1)
                            tmp2.append(gtiles2)
                        
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
272
                        ts = tf.concat(tmp2,axis=1)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
273 274 275 276 277 278 279 280 281 282 283 284
                        tmp1.append(ts)
                    
                    image_summary_op2 = tf.concat(tmp1,axis=0)
                                                
                    #image_summary_op1 = tf.assign(wimg_res,tf.zeros(shape=[32*(9+1),(9+1)*4, 3],dtype=tf.float32))
                    
                    #wimgo1 = tf.zeros(shape=[32*(9+1),(9+1)*4, 3],dtype=tf.float32)
                                                            
                    #tf.summary.image("wimg_res1",tf.reshape(wimg_res,[1,32*(9+1),(9+1)*4, 3]))
                    
                    #tf.summary.image("wimgo1",tf.reshape(wimgo1,[1,32*(9+1),(9+1)*4, 3]))
                    #tf.summary.image("wimgo2",tf.reshape(wimgo2,[1,32*(9+1),(9+1)*4, 3]))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
285 286 287
                    #tf.summary.image("TILE",tf.reshape(gtiles2,[1,10,10,3]))
                    #tf.summary.image("STRIPE",tf.reshape(ts,[1,10,40,3]))
                    tf.summary.image("W8S",tf.reshape(image_summary_op2,[1,320,40,3]))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
                    
                    # borders
                    #for mi in range(0,wimg_res.shape[0],10):
                    #    for mj in range(wimg_res.shape[1]):
                    #        wimg_res[mi,mj].assign([255,255,255])
                    
                    #wimg_res[9::(9+1),:].assign([255,0,0])
                    #wimg_res[:,9::(9+1)].assign([255,0,0])
                    
                    #for mi in range(0,wimg_res.shape[0],10):
                    #    print(mi)
                        #wimg_res = tf.stack([wing_res,])
                    
                    #wimg_1  = tf.reshape(w,[1,inp.shape[1],num_outs,1])
                    #wimg_1t = tf.transpose(wimg_1,[0,2,1,3])
                    
                    # w = w[a,b]
                    # wt = w[b,a]
                    # for i in range(b):
                    #     tmp =  
                    
                    
                    #tf.summary.image("wimg_1",wimg_1)
                    #tf.summary.image("wimg_1t",wimg_1t)
                    #tf.summary.image("wimg_res1",tf.reshape(wimg_res,[1,32*(9+1),(9+1)*4, 3]))
                
                b = tf.get_variable('biases',shape=[num_outs])
315 316
                tf.summary.histogram("weights",w)
                tf.summary.histogram("biases",b)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
317 318 319 320 321 322 323 324 325 326
    """
#  fc1  = slim.fully_connected(input, 256, activation_fn=lrelu,scope='g_fc1')
#  fc2  = slim.fully_connected(fc1,   128, activation_fn=lrelu,scope='g_fc2')
    fc3  =     slim.fully_connected(input, 256, activation_fn=lrelu,scope='g_fc3')
    fc4  =     slim.fully_connected(fc3,   128, activation_fn=lrelu,scope='g_fc4')
    fc5  =     slim.fully_connected(fc4,    64, activation_fn=lrelu,scope='g_fc5')
    """
###  fc3  =     slim.fully_connected(input,    32, activation_fn=lrelu,scope='g_fc3')
###  fc4  =     slim.fully_connected(fc3,    20, activation_fn=lrelu,scope='g_fc4')
###  fc5  =     slim.fully_connected(fc4,    16, activation_fn=lrelu,scope='g_fc5')
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
327

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
328 329
    if USE_CONFIDENCE:
        fc_out  = slim.fully_connected(fc[-1],     2, activation_fn=lrelu,scope='g_fc_out')
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
330

331 332
        with tf.variable_scope('g_fc_out',reuse=tf.AUTO_REUSE):
            w = tf.get_variable('weights',shape=[fc[-1].shape[1],2])
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
333 334
            tf.summary.image("wimage",tf.reshape(w,[1,fc[-1].shape[1],2,1]))
            b = tf.get_variable('biases',shape=[2])
335 336
            tf.summary.histogram("weights",w)
            tf.summary.histogram("biases",b)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
337 338

    else:
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
339
        fc_out  = slim.fully_connected(fc[-1],     1, activation_fn=None,scope='g_fc_out')
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
340

341 342
        with tf.variable_scope('g_fc_out',reuse=tf.AUTO_REUSE):
            w = tf.get_variable('weights',shape=[fc[-1].shape[1],1])
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
343
            tf.summary.image("wimage",tf.reshape(w,[1,fc[-1].shape[1],1,1]))
344 345 346
            b = tf.get_variable('biases',shape=[1])
            tf.summary.histogram("weights",w)
            tf.summary.histogram("biases",b)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
347
        #If using residual disparity, split last layer into 2 or remove activation and add rectifier to confidence only
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
348 349 350 351 352
    return fc_out

def batchLoss(out_batch,                   # [batch_size,(1..2)] tf_result
              target_disparity_batch,      # [batch_size]        tf placeholder
              gt_ds_batch,                 # [batch_size,2]      tf placeholder
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
353 354
              absolute_disparity =     True, #when false there should be no activation on disparity output !
              use_confidence =         True,
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
355 356 357 358 359 360 361 362 363
              lambda_conf_avg =        0.01,
              lambda_conf_pwr =        0.1,
              conf_pwr =               2.0,
              gt_conf_offset =         0.08,
              gt_conf_pwr =            1.0,
              error2_offset =          0.0025, # (0.05^2)
              disp_wmin =              1.0,    # minimal disparity to apply weight boosting for small disparities
              disp_wmax =              8.0,    # maximal disparity to apply weight boosting for small disparities
              use_out =                False):  # use calculated disparity for disparity weight boosting (False - use target disparity)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
364

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
    with tf.name_scope("BatchLoss"):
        """
        Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
        """
        tf_lambda_conf_avg = tf.constant(lambda_conf_avg, dtype=tf.float32, name="tf_lambda_conf_avg")
        tf_lambda_conf_pwr = tf.constant(lambda_conf_pwr, dtype=tf.float32, name="tf_lambda_conf_pwr")
        tf_conf_pwr =        tf.constant(conf_pwr,        dtype=tf.float32, name="tf_conf_pwr")
        tf_gt_conf_offset =  tf.constant(gt_conf_offset,  dtype=tf.float32, name="tf_gt_conf_offset")
        tf_gt_conf_pwr =     tf.constant(gt_conf_pwr,     dtype=tf.float32, name="tf_gt_conf_pwr")
        tf_num_tiles =       tf.shape(gt_ds_batch)[0]
        tf_0f =              tf.constant(0.0,             dtype=tf.float32, name="tf_0f")
        tf_1f =              tf.constant(1.0,             dtype=tf.float32, name="tf_1f")
        tf_maxw =            tf.constant(1.0,             dtype=tf.float32, name="tf_maxw")
        if gt_conf_pwr == 0:
            w = tf.ones((out_batch.shape[0]), dtype=tf.float32,name="w_ones")
        else:
    #        w_slice = tf.slice(gt_ds_batch,[0,1],[-1,1],              name = "w_gt_slice")
            w_slice = tf.reshape(gt_ds_batch[:,1],[-1],                     name = "w_gt_slice")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
383

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
384 385 386 387 388 389 390
            w_sub =   tf.subtract      (w_slice, tf_gt_conf_offset,         name = "w_sub")
    #        w_clip =  tf.clip_by_value(w_sub, tf_0f,tf_maxw,              name = "w_clip")
            w_clip =  tf.maximum(w_sub, tf_0f,                              name = "w_clip")
            if gt_conf_pwr == 1.0:
                w = w_clip
            else:
                w=tf.pow(w_clip, tf_gt_conf_pwr, name = "w_pow")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
391

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
392 393 394 395 396 397 398 399 400
        if use_confidence:
            tf_num_tilesf =      tf.cast(tf_num_tiles, dtype=tf.float32,     name="tf_num_tilesf")
    #        conf_slice =     tf.slice(out_batch,[0,1],[-1,1],                name = "conf_slice")
            conf_slice =     tf.reshape(out_batch[:,1],[-1],                 name = "conf_slice")
            conf_sum =       tf.reduce_sum(conf_slice,                       name = "conf_sum")
            conf_avg =       tf.divide(conf_sum, tf_num_tilesf,              name = "conf_avg")
            conf_avg1 =      tf.subtract(conf_avg, tf_1f,                    name = "conf_avg1")
            conf_avg2 =      tf.square(conf_avg1,                            name = "conf_avg2")
            cost2 =          tf.multiply (conf_avg2, tf_lambda_conf_avg,     name = "cost2")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
401

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
402 403 404 405 406 407 408 409 410 411
            iconf_avg =      tf.divide(tf_1f, conf_avg,                      name = "iconf_avg")
            nconf =          tf.multiply (conf_slice, iconf_avg,             name = "nconf") #normalized confidence
            nconf_pwr =      tf.pow(nconf, conf_pwr,                         name = "nconf_pwr")
            nconf_pwr_sum =  tf.reduce_sum(nconf_pwr,                        name = "nconf_pwr_sum")
            nconf_pwr_offs = tf.subtract(nconf_pwr_sum, tf_1f,               name = "nconf_pwr_offs")
            cost3 =          tf.multiply (conf_avg2, nconf_pwr_offs,         name = "cost3")
            w_all =          tf.multiply (w, nconf,                          name = "w_all")
        else:
            w_all = w
#            cost2 = 0.0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
412
#            cost3 = 0.0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
413 414 415 416
        # normalize weights
        w_sum =              tf.reduce_sum(w_all,                            name = "w_sum")
        iw_sum =             tf.divide(tf_1f, w_sum,                         name = "iw_sum")
        w_norm =             tf.multiply (w_all, iw_sum,                     name = "w_norm")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
417

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
418 419 420 421
    #    disp_slice =         tf.slice(out_batch,[0,0],[-1,1],                name = "disp_slice")
    #    d_gt_slice =         tf.slice(gt_ds_batch,[0,0],[-1,1],              name = "d_gt_slice")
        disp_slice =         tf.reshape(out_batch[:,0],[-1],                 name = "disp_slice")
        d_gt_slice =         tf.reshape(gt_ds_batch[:,0],[-1],               name = "d_gt_slice")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
422

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
423 424 425 426 427 428 429
        """
        if absolute_disparity:
            out_diff =       tf.subtract(disp_slice, d_gt_slice,             name = "out_diff")
        else:
            td_flat =        tf.reshape(target_disparity_batch,[-1],         name = "td_flat")
            residual_disp =  tf.subtract(d_gt_slice, td_flat,                name = "residual_disp")
            out_diff =       tf.subtract(disp_slice, residual_disp,          name = "out_diff")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
430
        """
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
431 432 433 434 435 436 437
        td_flat =        tf.reshape(target_disparity_batch,[-1],         name = "td_flat")
        if absolute_disparity:
            adisp =          disp_slice
        else:
#            td_flat =        tf.reshape(target_disparity_batch,[-1],         name = "td_flat")
            adisp =          tf.add(disp_slice, td_flat,                     name = "adisp")
        out_diff =           tf.subtract(adisp, d_gt_slice,                  name = "out_diff")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
438 439


Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
440 441
        out_diff2 =          tf.square(out_diff,                             name = "out_diff2")
        out_wdiff2 =         tf.multiply (out_diff2, w_norm,                 name = "out_wdiff2")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
442

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
443
        cost1 =              tf.reduce_sum(out_wdiff2,                       name = "cost1")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
444

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
445 446
        out_diff2_offset =   tf.subtract(out_diff2, error2_offset,           name = "out_diff2_offset")
        out_diff2_biased =   tf.maximum(out_diff2_offset, 0.0,               name = "out_diff2_biased")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
447

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
448 449 450 451 452 453 454 455 456 457
        # calculate disparity-based weight boost
        if use_out:
            dispw =          tf.clip_by_value(adisp, disp_wmin, disp_wmax,   name = "dispw")
        else:
            dispw =          tf.clip_by_value(td_flat, disp_wmin, disp_wmax, name = "dispw")
        dispw_boost =        tf.divide(disp_wmax, dispw,                     name = "dispw_boost")
        dispw_comp =         tf.multiply (dispw_boost, w_norm,               name = "dispw_comp")
        dispw_sum =          tf.reduce_sum(dispw_comp,                       name = "dispw_sum")
        idispw_sum =         tf.divide(tf_1f, dispw_sum,                     name = "idispw_sum")
        dispw_norm =         tf.multiply (dispw_comp, idispw_sum,            name = "dispw_norm")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
458

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
459 460 461
        out_diff2_wbiased =  tf.multiply(out_diff2_biased, dispw_norm,       name = "out_diff2_wbiased")
#        out_diff2_wbiased =  tf.multiply(out_diff2_biased, w_norm,       name = "out_diff2_wbiased")
        cost1b =             tf.reduce_sum(out_diff2_wbiased,                name = "cost1b")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
462

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
463 464
        if use_confidence:
            cost12 =         tf.add(cost1b, cost2,                           name = "cost12")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
465 466
            cost123 =        tf.add(cost12, cost3,                           name = "cost123")

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
467 468 469
            return cost123, disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
        else:
            return cost1b,  disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
470

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
471 472 473 474 475 476 477 478 479 480 481 482 483 484

#corr2d325 = tf.concat([corr2d,target_disparity],0)
#corr2d325 = tf.concat([next_element_train['corr2d'],tf.reshape(next_element_train['target_disparity'],(-1,1))],1)
corr2d325 = tf.concat([next_element_train['corr2d'], next_element_train['target_disparity']],1)
#next_element_train

#    in_features = tf.concat([corr2d,target_disparity],0)

out =       network_fc_simple(input=corr2d325, arch = NET_ARCH)
#Try standard loss functions first
G_loss, _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _cost1 = batchLoss(out_batch =         out,        # [batch_size,(1..2)] tf_result
              target_disparity_batch=  next_element_train['target_disparity'], # target_disparity, ### target_d,   # [batch_size]        tf placeholder
              gt_ds_batch =            next_element_train['gt_ds'], # gt_ds, ### gt,         # [batch_size,2]      tf placeholder
              absolute_disparity =     ABSOLUTE_DISPARITY,
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
485
              use_confidence =         USE_CONFIDENCE, # True,
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
486 487 488 489 490 491 492 493 494
              lambda_conf_avg =        0.01,
              lambda_conf_pwr =        0.1,
              conf_pwr =               2.0,
              gt_conf_offset =         0.08,
              gt_conf_pwr =            2.0,
              error2_offset =          0.0025, # (0.05^2)
              disp_wmin =              1.0,    # minimal disparity to apply weight boosting for small disparities
              disp_wmax =              8.0,    # maximal disparity to apply weight boosting for small disparities
              use_out =                False)  # use calculated disparity for disparity weight boosting (False - use target disparity)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
495

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
496 497 498 499 500 501 502 503 504 505 506
tf_ph_G_loss = tf.placeholder(tf.float32,shape=None,name='G_loss_avg')
tf_ph_sq_diff = tf.placeholder(tf.float32,shape=None,name='sq_diff_avg')
with tf.name_scope('sample'):
    tf.summary.scalar("G_loss",G_loss)
    tf.summary.scalar("sq_diff",_cost1)
with tf.name_scope('epoch_average'):
    tf.summary.scalar("G_loss_epoch",tf_ph_G_loss)
    tf.summary.scalar("sq_diff_epoch",tf_ph_sq_diff)

t_vars=tf.trainable_variables()
lr=tf.placeholder(tf.float32)
507 508
#G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)
G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(_cost1)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
509 510 511 512 513 514 515 516 517 518 519 520 521 522

saver=tf.train.Saver()

ROOT_PATH  = './attic/nn_ds_inmem4_graph'+SUFFIX+"/"
TRAIN_PATH = ROOT_PATH + 'train'
TEST_PATH  = ROOT_PATH + 'test'

# CLEAN OLD STAFF
shutil.rmtree(TRAIN_PATH, ignore_errors=True)
shutil.rmtree(TEST_PATH, ignore_errors=True)

# threading
from threading import Thread

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
523 524
thr_result = []

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
525
def read_new_tfrecord_file(filename,result):
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
526
    global thr_result
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
527
    a,b,c = readTFRewcordsEpoch(filename)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
528 529 530 531
    #result = [a,b,c]
    result.append(a)
    result.append(b)
    result.append(c)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
532 533
    print("Loaded new tfrecord file: "+str(filename))

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
534
train_record_index_counter = 0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
535
train_file_index = 0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
536

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
537
with tf.Session()  as sess:
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
538

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
539 540
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
541

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
542 543 544 545 546 547 548
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(TRAIN_PATH, sess.graph)
    test_writer  = tf.summary.FileWriter(TEST_PATH, sess.graph)
    loss_train_hist= np.empty(dataset_train_size, dtype=np.float32)
    loss_test_hist=  np.empty(dataset_test_size, dtype=np.float32)
    loss2_train_hist= np.empty(dataset_train_size, dtype=np.float32)
    loss2_test_hist=  np.empty(dataset_test_size, dtype=np.float32)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
549
    train_avg = 0.0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
550
    train2_avg = 0.0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
551
    test_avg = 0.0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
552
    test2_avg = 0.0
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
553

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
554
    for epoch in range(EPOCHS_TO_RUN):
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
555

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
556
        train_file_index = epoch%n_allowed_train_filesTFR
557
        print("train_file_index: "+str(train_file_index))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
558

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
559
        if epoch%10==0:
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
560 561

            # if there are more files than python3 memory allows
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
562 563 564
            if (n_allowed_train_filesTFR<len(train_filesTFR)):
                # circular loading?
                tmp_train_index = (n_allowed_train_filesTFR+train_record_index_counter)%len(train_filesTFR)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
565
                # wait for old thread
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
566
                if epoch!=0:
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
567
                    if thr.is_alive():
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
568 569 570
                        print_time("Waiting until tfrecord gets loaded")
                    thr.join()
                    # do replacement
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
571
                    ## remove the first
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
572 573 574 575 576 577 578
                    corr2d_trains.pop(0)
                    target_disparity_trains.pop(0)
                    gt_ds_trains.pop(0)
                    ## append
                    corr2d_trains.append(thr_result[0])
                    target_disparity_trains.append(thr_result[1])
                    gt_ds_trains.append(thr_result[2])
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
579

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
580
                print_time("Time to begin loading a new tfrecord file")
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
581 582
                # new thread
                thr_result = []
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
583 584 585 586 587
                thr = Thread(target=read_new_tfrecord_file, args=(train_filesTFR[tmp_train_index],thr_result))
                # start
                thr.start()

                train_record_index_counter += 1
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
588

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
589 590
#       if SHUFFLE_EPOCH:
#        dataset_train = dataset_train.shuffle(buffer_size=10000)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
591

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
592 593
        # RUN TRAIN SESSION

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
594 595 596
        sess.run(iterator_train.initializer, feed_dict={corr2d_train_placeholder: corr2d_trains[train_file_index],
                                                        target_disparity_train_placeholder: target_disparity_trains[train_file_index],
                                                        gt_ds_train_placeholder: gt_ds_trains[train_file_index]})
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
        for i in range(dataset_train_size):
            try:
                train_summary,_, G_loss_trained,  output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out  = sess.run(
                    [   merged,
                        G_opt,
                        G_loss,
                        out,
                        _disp_slice,
                        _d_gt_slice,
                        _out_diff,
                        _out_diff2,
                        _w_norm,
                        _out_wdiff2,
                        _cost1,
                        corr2d325,
                    ],
                    feed_dict={lr:LR,tf_ph_G_loss:train_avg, tf_ph_sq_diff:train2_avg}) # pfrevious value of *_avg
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
614

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
615 616 617 618 619
                # save all for now as a test
                #train_writer.add_summary(summary, i)
                #train_writer.add_summary(train_summary, i)
                loss_train_hist[i] =  G_loss_trained
                loss2_train_hist[i] = out_cost1
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
620
                                
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
621 622 623
            except tf.errors.OutOfRangeError:
                print("train done at step %d"%(i))
                break
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
624 625

        train_avg = np.average(loss_train_hist).astype(np.float32)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
626 627
        train2_avg = np.average(loss2_train_hist).astype(np.float32)

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
628 629
        # RUN TEST SESSION

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
        sess.run(iterator_train.initializer, feed_dict={corr2d_train_placeholder: corr2d_test,
                                                        target_disparity_train_placeholder: target_disparity_test,
                                                        gt_ds_train_placeholder: gt_ds_test})
        for i in range(dataset_test_size):
            try:
                test_summary, G_loss_tested, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out = sess.run(
                    [merged,
                     G_loss,
                     out,
                     _disp_slice,
                     _d_gt_slice,
                     _out_diff,
                     _out_diff2,
                     _w_norm,
                     _out_wdiff2,
                     _cost1,
                     corr2d325,
                     ],
                        feed_dict={lr:LR,tf_ph_G_loss:test_avg, tf_ph_sq_diff:test2_avg})  # pfrevious value of *_avg
                loss_test_hist[i] =  G_loss_tested
                loss2_test_hist[i] = out_cost1
            except tf.errors.OutOfRangeError:
                print("test done at step %d"%(i))
                break
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
654 655

        test_avg =  np.average(loss_test_hist).astype(np.float32)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
656 657
        test2_avg = np.average(loss2_test_hist).astype(np.float32)
        
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
658
        # they include image summaries as well
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
659 660
        train_writer.add_summary(train_summary, epoch)
        test_writer.add_summary(test_summary, epoch)
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
661

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
662
        print_time("%d:%d -> %f %f (%f %f)"%(epoch,i,train_avg, test_avg,train2_avg, test2_avg))
Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
663

Oleg Dzhimiev's avatar
Oleg Dzhimiev committed
664 665 666 667 668 669 670
     # Close writers
    train_writer.close()
    test_writer.close()
#reports error: Exception ignored in: <bound method BaseSession.__del__ of <tensorflow.python.client.session.Session object at 0x7efc5f720ef0>> if there is no print before exit()

print("All done")
exit (0)