Commit d7fc8ac4 authored by Andrey Filippov's avatar Andrey Filippov

Modyfying costs, feeding datasets to placeholders instead of constants

parent 47ad3d9f
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -30,7 +30,7 @@ FILES_PER_SCENE = 5 # number of random offset files for the scene to select f ...@@ -30,7 +30,7 @@ FILES_PER_SCENE = 5 # number of random offset files for the scene to select f
#MIN_BATCH_CHOICES = 10 # minimal number of tiles in a file for each bin to select from #MIN_BATCH_CHOICES = 10 # minimal number of tiles in a file for each bin to select from
#MAX_BATCH_FILES = 10 #maximal number of files to use in a batch #MAX_BATCH_FILES = 10 #maximal number of files to use in a batch
MAX_EPOCH = 500 MAX_EPOCH = 500
LR = 1e-3 # learning rate LR = 1e-4 # learning rate
USE_CONFIDENCE = False USE_CONFIDENCE = False
ABSOLUTE_DISPARITY = False ABSOLUTE_DISPARITY = False
DEBUG_PLT_LOSS = True DEBUG_PLT_LOSS = True
...@@ -167,16 +167,21 @@ checkpoint_dir = './attic/result_inmem/' ...@@ -167,16 +167,21 @@ checkpoint_dir = './attic/result_inmem/'
save_freq = 500 save_freq = 500
def lrelu(x): def lrelu(x):
return tf.maximum(x*0.2,x) return tf.maximum(x*0.5,x)
# return tf.nn.relu(x) # return tf.nn.relu(x)
def network(input): def network(input):
# fc1 = slim.fully_connected(input, 512, activation_fn=lrelu,scope='g_fc1') fc1 = slim.fully_connected(input, 256, activation_fn=lrelu,scope='g_fc1')
# fc2 = slim.fully_connected(fc1, 512, activation_fn=lrelu,scope='g_fc2') fc2 = slim.fully_connected(fc1, 128, activation_fn=lrelu,scope='g_fc2')
fc3 = slim.fully_connected(input, 256, activation_fn=lrelu,scope='g_fc3') ## fc3 = slim.fully_connected(input, 256, activation_fn=lrelu,scope='g_fc3')
fc4 = slim.fully_connected(fc3, 128, activation_fn=lrelu,scope='g_fc4') ## fc4 = slim.fully_connected(fc3, 128, activation_fn=lrelu,scope='g_fc4')
fc5 = slim.fully_connected(fc4, 64, activation_fn=lrelu,scope='g_fc5') ## fc5 = slim.fully_connected(fc4, 64, activation_fn=lrelu,scope='g_fc5')
fc3 = slim.fully_connected(fc2, 64, activation_fn=lrelu,scope='g_fc3')
fc4 = slim.fully_connected(fc3, 20, activation_fn=lrelu,scope='g_fc4')
fc5 = slim.fully_connected(fc4, 16, activation_fn=lrelu,scope='g_fc5')
if USE_CONFIDENCE: if USE_CONFIDENCE:
fc6 = slim.fully_connected(fc5, 2, activation_fn=lrelu,scope='g_fc6') fc6 = slim.fully_connected(fc5, 2, activation_fn=lrelu,scope='g_fc6')
else: else:
...@@ -318,8 +323,8 @@ with tf.Session() as sess: ...@@ -318,8 +323,8 @@ with tf.Session() as sess:
for epoch in range(EPOCHS_TO_RUN): for epoch in range(EPOCHS_TO_RUN):
if SHUFFLE_EPOCH: # if SHUFFLE_EPOCH:
dataset_train = dataset_train.shuffle(buffer_size=10000) dataset_train = dataset_train.shuffle(buffer_size=10000)
sess.run(iterator_train.initializer) sess.run(iterator_train.initializer)
i=0 i=0
...@@ -331,32 +336,61 @@ with tf.Session() as sess: ...@@ -331,32 +336,61 @@ with tf.Session() as sess:
# Train run # Train run
if i<START_TEST: if i<START_TEST:
try: if (epoch <50) or (epoch > 100) :
# _, G_current, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out, target_disparity_out, gt_ds_out = sess.run(
train_summary,_, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out = sess.run(
[ merged,
G_opt,
G_loss,
out,
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
corr2d325,
# target_disparity,
# gt_ds
],
feed_dict={lr:LR})
# save all for now as a test
#train_writer.add_summary(summary, i)
#train_writer.add_summary(train_summary, i)
except tf.errors.OutOfRangeError: try:
break # _, G_current, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out, target_disparity_out, gt_ds_out = sess.run(
train_summary,_, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out = sess.run(
[ merged,
G_opt,
G_loss,
out,
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
corr2d325,
# target_disparity,
# gt_ds
],
feed_dict={lr:LR})
# save all for now as a test
#train_writer.add_summary(summary, i)
#train_writer.add_summary(train_summary, i)
except tf.errors.OutOfRangeError:
break
else:
try:
# _, G_current, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out, target_disparity_out, gt_ds_out = sess.run(
train_summary, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out = sess.run(
[ merged,
# G_opt,
G_loss,
out,
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
corr2d325,
# target_disparity,
# gt_ds
],
feed_dict={lr:LR})
# save all for now as a test
#train_writer.add_summary(summary, i)
#train_writer.add_summary(train_summary, i)
except tf.errors.OutOfRangeError:
break
# Test run # Test run
else: else:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment