Commit a1c3b782 authored by Oleg Dzhimiev's avatar Oleg Dzhimiev

tensorboard graphs tests

parent e56bda2f
......@@ -23,6 +23,8 @@ import itertools
import time
import matplotlib.pyplot as plt
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
......@@ -45,6 +47,10 @@ VALUES_LAYER_NAME = 'other'
LAYERS_OF_INTEREST = ['diagm-pair', 'diago-pair', 'hor-pairs', 'vert-pairs']
RADIUS = 1
DEBUG_PLT_LOSS = False
# If false - will not pack or rescal
DEBUG_PACK_TILES = True
try:
src = sys.argv[1]
except IndexError:
......@@ -214,40 +220,62 @@ def lrelu(x):
def network(input):
fc1 = slim.fully_connected(input,101,activation_fn=lrelu,scope='g_fc1')
fc2 = slim.fully_connected(fc1, 101,activation_fn=lrelu,scope='g_fc2')
fc3 = slim.fully_connected(fc2, 101,activation_fn=lrelu,scope='g_fc3')
fc4 = slim.fully_connected(fc3, 101,activation_fn=lrelu,scope='g_fc4')
fc5 = slim.fully_connected(fc4, 2,activation_fn=lrelu,scope='g_fc5')
fc1 = slim.fully_connected(input,2048,activation_fn=lrelu,scope='g_fc1')
fc2 = slim.fully_connected(fc1, 1024,activation_fn=lrelu,scope='g_fc2')
fc3 = slim.fully_connected(fc2, 512,activation_fn=lrelu,scope='g_fc3')
fc4 = slim.fully_connected(fc3, 8,activation_fn=lrelu,scope='g_fc4')
fc5 = slim.fully_connected(fc4, 4,activation_fn=lrelu,scope='g_fc5')
fc6 = slim.fully_connected(fc5, 2,activation_fn=lrelu,scope='g_fc6')
return fc5
return fc6
sess = tf.Session()
in_tile = tf.placeholder(tf.float32,[None,101])
gt = tf.placeholder(tf.float32,[None,2])
#losses = tf.get_variable("losses", [None])
#update_operation = tf.assign(losses,tf.concat([losses,G_loss]))
#mean_loss = tf.reduce_mean(losses)
#tf.summary.scalar('gt_value', gt[0])
#tf.summary.scalar('gt_confidence', gt[1])
#tf.summary.scalar('gt_value',gt[0,0])
#cf_cutoff = tf.constant(tf.float32,[None,1])
out = network(in_tile)
#tf.summary.scalar('out_value', out[0,0])
#tf.summary.scalar('out_confidence', out[1])
# min cutoff
cf_cutoff = 0.173303
cf_w = tf.pow(tf.maximum(gt[:,1]-cf_cutoff,0.0),1)
cf_wsum = tf.reduce_sum(cf_w)
cf_w_norm = cf_w/cf_wsum
#cf_wsum = tf.reduce_sum(cf_w[~tf.is_nan(cf_w)])
#cf_w_norm = cf_w/cf_wsum
cf_w_norm = tf.nn.softmax(cf_w)
#out_cf = out[:,1]
G_loss = tf.reduce_mean(tf.abs(out[:,0]-cf_w_norm*gt[:,0]))
G_loss = tf.reduce_mean(tf.abs(tf.nn.softmax(out[:,1])*out[:,0]-cf_w_norm*gt[:,0]))
tf.summary.scalar('loss', G_loss)
tf.summary.scalar('prediction', out[0,0])
tf.summary.scalar('ground truth', gt[0,0])
t_vars=tf.trainable_variables()
lr=tf.placeholder(tf.float32)
G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss,var_list=[var for var in t_vars if var.name.startswith('g_')])
saver=tf.train.Saver()
# ?!!!!!
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(result_dir + '/train', sess.graph)
test_writer = tf.summary.FileWriter(result_dir + '/test')
sess.run(tf.global_variables_initializer())
ckpt=tf.train.get_checkpoint_state(checkpoint_dir)
......@@ -263,17 +291,37 @@ for folder in allfolders:
g_loss = np.zeros((packed_tiles.shape[0]*packed_tiles.shape[1],1))
learning_rate = 1e-4
recorded_loss = []
recorded_mean_loss = []
recorded_gt_d = []
recorded_gt_c = []
recorded_pr_d = []
recorded_pr_c = []
LR = 1e-4
print(bcolors.HEADER+"Last Epoch = "+str(lastepoch)+bcolors.ENDC)
if DEBUG_PLT_LOSS:
plt.ion() # something about plotting
plt.figure(1, figsize=(4,12))
pass
# RUN
for epoch in range(lastepoch,1):
#for epoch in range(lastepoch,4001):
if os.path.isdir("result/%04d"%epoch):
continue
cnt=0
if epoch > 2000:
learning_rate = 1e-5
LR = 1e-5
for ind in np.random.permutation(packed_tiles.shape[0]*packed_tiles.shape[1]):
......@@ -313,18 +361,73 @@ for epoch in range(lastepoch,1):
#print(bcolors.WARNING+"Found NaN, skipping iteration for tile "+str(i)+","+str(j)+bcolors.ENDC)
pass
else:
_,G_current,output = sess.run([G_opt,G_loss,out],feed_dict={in_tile:input_patch,gt:gt_patch,lr:learning_rate})
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_,G_current,output,summary = sess.run([G_opt,G_loss,out,merged],feed_dict={in_tile:input_patch,gt:gt_patch,lr:LR},options=run_options,run_metadata=run_metadata)
#_,G_current,output = sess.run([G_opt,G_loss,out],feed_dict={in_tile:input_patch,gt:gt_patch,lr:LR})
g_loss[ind]=G_current
mean_loss = np.mean(g_loss[np.where(g_loss)])
if DEBUG_PLT_LOSS:
recorded_loss.append(G_current)
recorded_mean_loss.append(mean_loss)
recorded_pr_d.append(output[0,0])
recorded_pr_c.append(output[0,1])
recorded_gt_d.append(gt_patch[0,0])
recorded_gt_c.append(gt_patch[0,1])
plt.clf()
print("%d %d Loss=%.3f CurrentLoss=%.3f Time=%.3f"%(epoch,cnt,np.mean(g_loss[np.where(g_loss)]),G_current,time.time()-st))
plt.subplot(311)
plt.plot(recorded_loss, label='loss')
plt.plot(recorded_mean_loss, label='mean loss', color='red')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title("Loss=%.5f, Mean Loss=%.5f"%(G_current,mean_loss), fontdict={'size': 20, 'color': 'red'})
#plt.text(0.5, 0.5, 'Loss=%.5f' % G_current, fontdict={'size': 20, 'color': 'red'})
plt.subplot(312)
plt.xlabel('Iteration')
plt.ylabel('Disparities')
plt.plot(recorded_gt_d, label='gt_d',color='green')
plt.plot(recorded_pr_d, label='pr_d',color='red')
plt.legend(loc='best',ncol=1)
plt.subplot(313)
plt.xlabel('Iteration')
plt.ylabel('Confidences')
plt.plot(recorded_gt_c, label='gt_c',color='green')
plt.plot(recorded_pr_c, label='pr_c',color='red')
plt.legend(loc='best',ncol=1)
plt.pause(0.001)
else:
print("%d %d Loss=%.3f CurrentLoss=%.3f Time=%.3f"%(epoch,cnt,mean_loss,G_current,time.time()-st))
train_writer.add_run_metadata(run_metadata, 'step%d' % cnt)
#test_writer.add_summary(summary,cnt)
train_writer.add_summary(summary, cnt)
if epoch%save_freq==0:
if not os.path.isdir(result_dir + '%04d'%epoch):
os.makedirs(result_dir + '%04d'%epoch)
saver.save(sess, checkpoint_dir + 'model.ckpt')
train_writer.close()
test_writer.close()
print_time()
print(bcolors.OKGREEN+"time: "+str(time.time())+bcolors.ENDC)
plt.ioff()
plt.show()
......@@ -68,13 +68,14 @@ def lrelu(x):
def network(input):
fc1 = slim.fully_connected(input,101,activation_fn=lrelu,scope='g_fc1')
fc2 = slim.fully_connected(fc1, 101,activation_fn=lrelu,scope='g_fc2')
fc3 = slim.fully_connected(fc2, 101,activation_fn=lrelu,scope='g_fc3')
fc4 = slim.fully_connected(fc3, 101,activation_fn=lrelu,scope='g_fc4')
fc5 = slim.fully_connected(fc4, 2,activation_fn=lrelu,scope='g_fc5')
fc1 = slim.fully_connected(input,2048,activation_fn=lrelu,scope='g_fc1')
fc2 = slim.fully_connected(fc1, 1024,activation_fn=lrelu,scope='g_fc2')
fc3 = slim.fully_connected(fc2, 512,activation_fn=lrelu,scope='g_fc3')
fc4 = slim.fully_connected(fc3, 8,activation_fn=lrelu,scope='g_fc4')
fc5 = slim.fully_connected(fc4, 4,activation_fn=lrelu,scope='g_fc5')
fc6 = slim.fully_connected(fc5, 2,activation_fn=lrelu,scope='g_fc6')
return fc5
return fc6
sess = tf.Session()
......@@ -147,6 +148,9 @@ for item in tlist:
packed_tiles = np.array([[pile.pack_tile(tiles[i,j],ptab) for j in range(tiles.shape[1])] for i in range(tiles.shape[0])])
packed_tiles = np.dstack((packed_tiles,values[:,:,0]))
print(packed_tiles.shape)
print("ENDDD!")
# flatten
packed_tiles_flat = packed_tiles.reshape(-1, packed_tiles.shape[-1])
values_flat = values.reshape(-1, values.shape[-1])
......@@ -160,6 +164,25 @@ for item in tlist:
print("Output shape: "+str(output.shape))
output_image = np.reshape(output,(tiles.shape[0],tiles.shape[1],-1))
print(output_image.shape)
import imagej_tiffwriter
# 1 prediction
# 2 ground truth
# difference 1 - 2
im1 = output_image[:,:,0]
im2 = values[:,:,1]
im3 = im1-im2
tif = np.dstack((im1,im2,im3))
imagej_tiffwriter.save('prediction_results.tiff',tif)
sys.exit(0)
# so, let's print
for i in range(output.shape[0]):
p = output[i,0]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment