Commit fc63a9dd authored by Oleg Dzhimiev's avatar Oleg Dzhimiev

still testing

parent 937842a1
......@@ -47,7 +47,7 @@ VALUES_LAYER_NAME = 'other'
LAYERS_OF_INTEREST = ['diagm-pair', 'diago-pair', 'hor-pairs', 'vert-pairs']
RADIUS = 1
DEBUG_PLT_LOSS = False
DEBUG_PLT_LOSS = True
# If false - will not pack or rescal
DEBUG_PACK_TILES = True
......@@ -75,6 +75,9 @@ if not IS_TEST:
print("\n".join(tlist))
print("Found "+str(len(tlist))+" preprocessed tiff files:")
print_time()
pass
''' WARNING, assuming:
- timestamps and part of names match
- layer order and names are identical
......@@ -148,7 +151,6 @@ if not IS_TEST:
# might not need it because going to loop through anyway
packed_tiles = np.array([[pile.pack_tile(tiles[i,j],ptab) for j in range(tiles.shape[1])] for i in range(tiles.shape[0])])
packed_tiles = np.dstack((packed_tiles,values[:,:,0]))
print("Packed (81x4 -> 1x(25*4+1)) tiled input shape: "+str(packed_tiles.shape))
......@@ -260,8 +262,9 @@ cf_w_norm = tf.nn.softmax(cf_w)
#out_cf = out[:,1]
#G_loss = tf.reduce_mean(tf.abs(tf.nn.softmax(out[:,1])*out[:,0]-cf_w_norm*gt[:,0]))
G_loss = tf.reduce_mean(tf.squared_difference(out[:,0], gt[:,0]))
#G_loss = tf.reduce_mean(tf.squared_difference(out[:,0], gt[:,0]))
#G_loss = tf.reduce_mean(tf.abs(out[:,0]-gt[:,0]))
G_loss = tf.losses.mean_squared_error(gt[:,0],out[:,0],cf_w)
tf.summary.scalar('loss', G_loss)
tf.summary.scalar('prediction', out[0,0])
......@@ -303,7 +306,7 @@ recorded_gt_c = []
recorded_pr_d = []
recorded_pr_c = []
LR = 1e-4
LR = 1e-5
print(bcolors.HEADER+"Last Epoch = "+str(lastepoch)+bcolors.ENDC)
......@@ -316,14 +319,37 @@ if DEBUG_PLT_LOSS:
# RUN
# epoch is one image
for epoch in range(lastepoch,lastepoch+len(tlist)):
print(bcolors.HEADER+"Epoch #"+str(epoch)+bcolors.ENDC)
for epoch in range(lastepoch,1):
#for epoch in range(lastepoch,4001):
if os.path.isdir("result/%04d"%epoch):
continue
cnt=0
tlist_index = epoch - lastepoch
print(bcolors.OKGREEN+"Processing "+tlist[tlist_index]+bcolors.ENDC)
tmp_tiff = ijt.imagej_tiff(tlist[tlist_index])
tmp_tiles = tmp_tiff.getstack(labels,shape_as_tiles=True)
tmp_vals = tmp_tiff.getvalues(label=VALUES_LAYER_NAME)
# Parse packing table
# packing table name
ptab_name = "tile_packing_table.xml"
ptab = pile.PackingTable(ptab_name,LAYERS_OF_INTEREST).lut
# might not need it because going to loop through anyway
packed_tiles = np.array([[pile.pack_tile(tmp_tiles[i,j],ptab) for j in range(tmp_tiles.shape[1])] for i in range(tmp_tiles.shape[0])])
packed_tiles = np.dstack((packed_tiles,tmp_vals[:,:,0]))
#if epoch > 2000:
# LR = 1e-5
......@@ -429,10 +455,9 @@ for epoch in range(lastepoch,1):
else:
print("%d %d Loss=%.3f CurrentLoss=%.3f Time=%.3f"%(epoch,cnt,mean_loss,G_current,time.time()-st))
train_writer.add_run_metadata(run_metadata, 'step%d' % cnt)
#train_writer.add_run_metadata(run_metadata, 'step%d' % cnt)
#test_writer.add_summary(summary,cnt)
train_writer.add_summary(summary, cnt)
#train_writer.add_summary(summary, cnt)
if epoch%save_freq==0:
if not os.path.isdir(result_dir + '%04d'%epoch):
......
......@@ -151,24 +151,49 @@ for item in tlist:
print(packed_tiles.shape)
print("ENDDD!")
# NO
# flatten
packed_tiles_flat = packed_tiles.reshape(-1, packed_tiles.shape[-1])
values_flat = values.reshape(-1, values.shape[-1])
#packed_tiles_flat = packed_tiles.reshape(-1, packed_tiles.shape[-1])
#values_flat = values.reshape(-1, values.shape[-1])
print("Packed (81x4 -> 1x(25*4+1)) tiled input shape: "+str(packed_tiles_flat.shape))
print("Values shape "+str(values_flat.shape))
print_time()
#print("Packed (81x4 -> 1x(25*4+1)) tiled input shape: "+str(packed_tiles_flat.shape))
#print("Values shape "+str(values_flat.shape))
#print_time()
# do line by line?!
output_image = np.empty((packed_tiles.shape[0],packed_tiles.shape[1],2))
print("Output shape = "+str(output_image.shape))
for i in range(packed_tiles.shape[0]):
# now run prediction
packed_tiles_flat = packed_tiles[i]
values_flat = values[i]
output = sess.run(out,feed_dict={in_tile:packed_tiles_flat})
output_image[i] = output
print("Output shape: "+str(output.shape))
# so, let's print
for j in range(output.shape[0]):
p = output[j,0]
pc = output[j,1]
fv = values_flat[j,0]
gt = values_flat[j,1]
cf = values_flat[j,2]
vstring = "["+"{0:.2f}".format(fv)+", "+"{0:.2f}".format(gt)+", "+"{0:.2f}".format(cf)+"]"
pstring = "["+"{0:.2f}".format(p)+", "+"{0:.2f}".format(pc)+"]"
if not np.isnan(p):
outstring = "i,j: "+str(i)+" "+str(j)+" Values: "+vstring+" Prediction: "+pstring
if abs(cf)<0.5:
print(outstring)
#pass
else:
print(bcolors.WARNING+outstring+bcolors.ENDC)
output_image = np.reshape(output,(tiles.shape[0],tiles.shape[1],-1))
print(output_image.shape)
sess.close()
import imagej_tiffwriter
# 1 prediction
......@@ -183,25 +208,7 @@ for item in tlist:
imagej_tiffwriter.save('prediction_results.tiff',tif)
sys.exit(0)
# so, let's print
for i in range(output.shape[0]):
p = output[i,0]
pc = output[i,1]
fv = values_flat[i,0]
gt = values_flat[i,1]
cf = values_flat[i,2]
vstring = "["+"{0:.2f}".format(fv)+", "+"{0:.2f}".format(gt)+", "+"{0:.2f}".format(cf)+"]"
pstring = "["+"{0:.2f}".format(p)+", "+"{0:.2f}".format(pc)+"]"
if not np.isnan(p):
outstring = "i: "+str(i)+" Values: "+vstring+" Prediction: "+pstring
if cf<0.5:
print(outstring)
else:
print(bcolors.WARNING+outstring+bcolors.ENDC)
#sys.exit(0)
#else:
# print("i: "+str(i)+" NaNs")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment