Commit 17e64c60 authored by Andrey Filippov's avatar Andrey Filippov

Merge branch 'master' of git@git.elphel.com:Elphel/python3-imagej-tiff.git

parents 94636179 6f404273
...@@ -50,10 +50,12 @@ BATCH_SIZE = 2*1080//9 # == 120 Each batch of tiles has balanced D/S tiles ...@@ -50,10 +50,12 @@ BATCH_SIZE = 2*1080//9 # == 120 Each batch of tiles has balanced D/S tiles
SHUFFLE_EPOCH = True SHUFFLE_EPOCH = True
NET_ARCH1 = 0 #0 # 4 # 3 # overwrite with argv? NET_ARCH1 = 0 #0 # 4 # 3 # overwrite with argv?
NET_ARCH2 = 0 # 0 # 3 # overwrite with argv? NET_ARCH2 = 0 # 0 # 3 # overwrite with argv?
SYM8_SUB = False # True # False # True # False # True # False # enforce inputs from 2d correlation have symmetrical ones (groups of 8)
ONLY_TILE = None # 4 # None # 0 # 4# None # (remove all but center tile data), put None here for normal operation) ONLY_TILE = None # 4 # None # 0 # 4# None # (remove all but center tile data), put None here for normal operation)
ZIP_LHVAR = True # combine _lvar and _hvar as odd/even elements ZIP_LHVAR = True # combine _lvar and _hvar as odd/even elements
#DEBUG_PACK_TILES = True #DEBUG_PACK_TILES = True
WLOSS_LAMBDA = 0.001 # 5.0 # 1.0 # fraction of the W_loss (input layers weight non-uniformity) added to G_loss
SUFFIX=str(NET_ARCH1)+'-'+str(NET_ARCH2)+ (["R","A"][ABSOLUTE_DISPARITY]) SUFFIX=str(NET_ARCH1)+'-'+str(NET_ARCH2)+ (["R","A"][ABSOLUTE_DISPARITY])
# CLUSTER_RADIUS should match input data # CLUSTER_RADIUS should match input data
CLUSTER_RADIUS = 1 # 1 - 3x3, 2 - 5x5 tiles CLUSTER_RADIUS = 1 # 1 - 3x3, 2 - 5x5 tiles
...@@ -250,10 +252,10 @@ files_train_hvar = ["/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/ ...@@ -250,10 +252,10 @@ files_train_hvar = ["/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/
"/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/train007_R1_GT_1.5.tfrecords", "/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/train007_R1_GT_1.5.tfrecords",
] ]
files_train_lvar = ["/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/train000_R1_LE_1.5.tfrecords", #files_train_lvar = ["/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/train000_R1_LE_1.5.tfrecords",
] # ]
files_train_hvar = ["/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/train000_R1_GT_1.5.tfrecords", #files_train_hvar = ["/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/train000_R1_GT_1.5.tfrecords",
] #]
#files_train_hvar = [] #files_train_hvar = []
#file_test_lvar= "/home/eyesis/x3d_data/data_sets/tf_data_3x3a/train000_R1_LE_1.5.tfrecords" # "/home/eyesis/x3d_data/data_sets/train-000_R1_LE_1.5.tfrecords" #file_test_lvar= "/home/eyesis/x3d_data/data_sets/tf_data_3x3a/train000_R1_LE_1.5.tfrecords" # "/home/eyesis/x3d_data/data_sets/train-000_R1_LE_1.5.tfrecords"
file_test_lvar= "/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/testTEST_R1_LE_1.5.tfrecords" file_test_lvar= "/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_rand2/testTEST_R1_LE_1.5.tfrecords"
...@@ -445,6 +447,7 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope ...@@ -445,6 +447,7 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope
global test_op global test_op
# the scope is known
with tf.variable_scope(scope,reuse=tf.AUTO_REUSE): with tf.variable_scope(scope,reuse=tf.AUTO_REUSE):
# histograms # histograms
w = tf.get_variable('weights',shape=[in_shape,out_shape]) w = tf.get_variable('weights',shape=[in_shape,out_shape])
...@@ -462,8 +465,13 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope ...@@ -462,8 +465,13 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope
# red - the values will be automapped to 0-255 range # red - the values will be automapped to 0-255 range
# grid = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)]) # grid = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)])
# yellow - the values will be automapped to 0-255 range # yellow - the values will be automapped to 0-255 range
grid_y = tf.stack([tf.reduce_max(w),tf.reduce_max(w),tf.reduce_max(w)/2]) #grid_y = tf.stack([tf.reduce_max(w),tf.reduce_max(w),tf.reduce_max(w)/2])
grid_r = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)]) # black
grid_y = tf.stack([tf.reduce_min(w),tf.reduce_min(w),tf.reduce_min(w)])
#grid_r = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)])
# white
grid_r = tf.stack([tf.reduce_max(w),tf.reduce_max(w),tf.reduce_max(w)])
wt = tf.transpose(w,[1,0]) wt = tf.transpose(w,[1,0])
wt = wt[:,:-1] wt = wt[:,:-1]
...@@ -521,8 +529,12 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope ...@@ -521,8 +529,12 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope
# red - the values will be automapped to 0-255 range # red - the values will be automapped to 0-255 range
# grid = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)]) # grid = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)])
# yellow - the values will be automapped to 0-255 range # yellow - the values will be automapped to 0-255 range
grid_y = tf.stack([tf.reduce_max(w),tf.reduce_max(w),tf.reduce_max(w)/2]) # black
grid_r = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)]) grid_y = tf.stack([tf.reduce_min(w),tf.reduce_min(w),tf.reduce_min(w)])
#grid_r = tf.stack([tf.reduce_max(w),tf.reduce_min(w),tf.reduce_min(w)])
# white
grid_r = tf.stack([tf.reduce_max(w),tf.reduce_max(w),tf.reduce_max(w)])
wt = tf.transpose(w,[1,0]) wt = tf.transpose(w,[1,0])
...@@ -592,22 +604,48 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope ...@@ -592,22 +604,48 @@ def network_summary_w_b(scope, in_shape, out_shape, layout, index, network_scope
def network_sub(input, layout, reuse): def network_sub(input, layout, reuse, sym8 = False):
last_indx = None; last_indx = None;
fc = [] fc = []
inp_weights = []
for i, num_outs in enumerate (layout): for i, num_outs in enumerate (layout):
if num_outs: if num_outs:
if fc: if fc:
inp = fc[-1] inp = fc[-1]
fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu, scope='g_fc_sub'+str(i), reuse = reuse))
else: else:
inp = input inp = input
if sym8:
fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu, scope='g_fc_sub'+str(i), reuse = reuse)) inp8 = sym_inputs8(inp)
num_non_sum = num_outs % len(inp8) # if number of first layer outputs is not multiple of 8
num_sym8 = num_outs // len(inp8) # number of symmetrical groups
fc_sym = []
for j in range (len(inp8)): # ==8
reuse_this = reuse | (j > 0)
scp = 'g_fc_sub'+str(i)
fc_sym.append(slim.fully_connected(inp8[j], num_sym8, activation_fn=lrelu, scope= scp, reuse = reuse_this))
if not reuse_this:
with tf.variable_scope(scp,reuse=True) : # tf.AUTO_REUSE):
inp_weights.append(tf.get_variable('weights')) # ,shape=[inp.shape[1],num_outs]))
network_summary_w_b(scp, inp.shape[1], num_outs, layout, i, 'sub')
if num_non_sum > 0:
reuse_this = reuse
scp = 'g_fc_sub'+str(i)+"r"
fc_sym.append(slim.fully_connected(inp, num_non_sum, activation_fn=lrelu, scope=scp, reuse = reuse_this))
if not reuse_this:
with tf.variable_scope(scp,reuse=True) : # tf.AUTO_REUSE):
inp_weights.append(tf.get_variable('weights')) # ,shape=[inp.shape[1],num_outs]))
network_summary_w_b(scp, inp.shape[1], num_outs, layout, i, 'sub')
fc.append(tf.concat(fc_sym, 1, name='sym_input_layer'))
else:
scp = 'g_fc_sub'+str(i)
fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu, scope= scp, reuse = reuse))
if not reuse: if not reuse:
network_summary_w_b('g_fc_sub'+str(i), inp.shape[1], num_outs, layout, i, 'sub') with tf.variable_scope(scp, reuse=True) : # tf.AUTO_REUSE):
inp_weights.append(tf.get_variable('weights')) # ,shape=[inp.shape[1],num_outs]))
network_summary_w_b(scp, inp.shape[1], num_outs, layout, i, 'sub')
return fc[-1] return fc[-1], inp_weights
def network_inter(input, layout): def network_inter(input, layout):
last_indx = None; last_indx = None;
...@@ -630,22 +668,31 @@ def network_inter(input, layout): ...@@ -630,22 +668,31 @@ def network_inter(input, layout):
#If using residual disparity, split last layer into 2 or remove activation and add rectifier to confidence only #If using residual disparity, split last layer into 2 or remove activation and add rectifier to confidence only
return fc_out return fc_out
def network_siam(input, # now [?:9,325] def network_siam(input, # now [?,9,325]-> [?,25,325]
layout1, layout1,
layout2, layout2,
only_tile=None): # just for debugging - feed only data from the center sub-network sym8 = False,
only_tile = None): # just for debugging - feed only data from the center sub-network
with tf.name_scope("Siam_net"): with tf.name_scope("Siam_net"):
inp_weights = []
num_legs = input.shape[1] # == 9 num_legs = input.shape[1] # == 9
inter_list = [] inter_list = []
reuse = False reuse = False
for i in range (num_legs): for i in range (num_legs):
if (only_tile is None) or (i == only_tile): if (only_tile is None) or (i == only_tile):
inter_list.append(network_sub(input[:,i,:], # inter_list.append(network_sub(input[:,i,:],
# layout= layout1,
# reuse= reuse,
# sym8 = sym8))
ns, ns_weights = network_sub(input[:,i,:],
layout= layout1, layout= layout1,
reuse= reuse)) reuse= reuse,
sym8 = sym8)
inter_list.append(ns)
inp_weights += ns_weights
reuse = True reuse = True
inter_tensor = tf.concat(inter_list, 1, name='inter_tensor') inter_tensor = tf.concat(inter_list, 1, name='inter_tensor')
return network_inter (inter_tensor, layout2) return network_inter (inter_tensor, layout2), inp_weights
#corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[None,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [None,cluster_size, 1])],2) #corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[None,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [None,cluster_size, 1])],2)
def debug_gt_variance( def debug_gt_variance(
...@@ -774,6 +821,36 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result ...@@ -774,6 +821,36 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
else: else:
return cost1b, disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1 return cost1b, disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
def weightsLoss(inp_weights): # [batch_size,(1..2)] tf_result
# weights_lambdas): # single lambda or same length as inp_weights.shape[1]
"""
Enforcing 'smooth' weights for the input 2d correlation tiles
@return mean squared difference for each weight and average of 8 neighbors divided by mean squared weights
"""
weight_ortho = 1.0
weight_diag = 0.7
sw = 4.0 * (weight_ortho + weight_diag)
weight_ortho /= sw
weight_diag /= sw
# w_neib = tf.const([[weight_diag, weight_ortho, weight_diag],
# [weight_ortho, -1.0, weight_ortho],
# [weight_diag, weight_ortho, weight_diag]])
with tf.name_scope("WeightsLoss"):
# Adding 1 tile border
tf_inp = tf.reshape(inp_weights[:TILE_LAYERS * TILE_SIZE,:], [TILE_LAYERS, FILE_TILE_SIDE, FILE_TILE_SIDE, inp_weights.shape[1]], name = "tf_inp")
tf_inp_ext_h = tf.concat([tf_inp [:, :, :1, :], tf_inp, tf_inp [:, :, -1:, :]], axis = 2, name ="tf_inp_ext_h")
tf_inp_ext = tf.concat([tf_inp_ext_h [:, :1, :, :], tf_inp_ext_h, tf_inp_ext_h[:, -1:, :, :]], axis = 1, name ="tf_inp_ext")
s_ortho = tf_inp_ext[:,1:-1,:-2,:] + tf_inp_ext[:,1:-1, 2:,:] + tf_inp_ext[:,1:-1,:-2,:] + tf_inp_ext[:,1:-1, 2:, :]
s_corn = tf_inp_ext[:, :-2,:-2,:] + tf_inp_ext[:, :-2, 2:,:] + tf_inp_ext[:,2:, :-2,:] + tf_inp_ext[:,2: , 2:, :]
w_diff = tf.subtract(tf_inp, s_ortho * weight_ortho + s_corn * weight_diag, name="w_diff")
w_diff2 = tf.multiply(w_diff, w_diff, name="w_diff2")
w_var = tf.reduce_mean(w_diff2, name="w_var")
w2_mean = tf.reduce_mean(inp_weights * inp_weights, name="w2_mean")
w_rel = tf.divide(w_var, w2_mean, name= "w_rel")
return w_rel # scalar, cost for weights non-smoothness in 2d
#In GPU - reformat inputs #In GPU - reformat inputs
##corr2d325 = tf.concat([next_element_tt['corr2d'], next_element_tt['target_disparity']],1) ##corr2d325 = tf.concat([next_element_tt['corr2d'], next_element_tt['target_disparity']],1)
...@@ -781,6 +858,9 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result ...@@ -781,6 +858,9 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
#Should have shape (?,9,325) #Should have shape (?,9,325)
corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1])],2) corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1])],2)
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")], axis=2, name = "corr2d_Nx325")
#corr2d9x324 = tf.reshape( next_element_tt['corr2d'], [-1, cluster_size, FEATURES_PER_TILE], name = 'corr2d9x324') #corr2d9x324 = tf.reshape( next_element_tt['corr2d'], [-1, cluster_size, FEATURES_PER_TILE], name = 'corr2d9x324')
#td9x1 = tf.reshape(next_element_tt['target_disparity'], [-1, cluster_size, 1], name = 'td9x1') #td9x1 = tf.reshape(next_element_tt['target_disparity'], [-1, cluster_size, 1], name = 'td9x1')
#corr2d9x325 = tf.concat([corr2d9x324 , td9x1],2, name = 'corr2d9x325') #corr2d9x325 = tf.concat([corr2d9x324 , td9x1],2, name = 'corr2d9x325')
...@@ -789,9 +869,10 @@ corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,F ...@@ -789,9 +869,10 @@ corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,F
# in_features = tf.concat([corr2d,target_disparity],0) # in_features = tf.concat([corr2d,target_disparity],0)
#out = network_fc_simple(input=corr2d325, arch = NET_ARCH1) #out = network_fc_simple(input=corr2d325, arch = NET_ARCH1)
out = network_siam(input=corr2d9x325, out, inp_weights = network_siam(input=corr2d_Nx325,
layout1 = NN_LAYOUT1, layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2, layout2 = NN_LAYOUT2,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE) #Remove/put None for normal operation only_tile = ONLY_TILE) #Remove/put None for normal operation
# w_slice = tf.reshape(gt_ds_batch[:,1],[-1], name = "w_gt_slice") # w_slice = tf.reshape(gt_ds_batch[:,1],[-1], name = "w_gt_slice")
...@@ -809,11 +890,18 @@ G_loss, _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _ ...@@ -809,11 +890,18 @@ G_loss, _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _
conf_pwr = 2.0, conf_pwr = 2.0,
gt_conf_offset = 0.08, gt_conf_offset = 0.08,
gt_conf_pwr = 2.0, gt_conf_pwr = 2.0,
error2_offset = 0.0025, # (0.05^2) error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity) use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
if WLOSS_LAMBDA > 0.0:
W_loss = weightsLoss(inp_weights[0]) # inp_weights - list of tensors, currently - just [0]
GW_loss = tf.add(G_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
else:
GW_loss = G_loss
W_loss = tf.constant(0.0)
#debug #debug
GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8) GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8)
center_indx = 4, # center tile index center_indx = 4, # center tile index
...@@ -834,7 +922,9 @@ with tf.name_scope('epoch_average'): ...@@ -834,7 +922,9 @@ with tf.name_scope('epoch_average'):
t_vars=tf.trainable_variables() t_vars=tf.trainable_variables()
lr=tf.placeholder(tf.float32) lr=tf.placeholder(tf.float32)
G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)
#G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)
G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(GW_loss)
saver=tf.train.Saver() saver=tf.train.Saver()
...@@ -855,18 +945,18 @@ with tf.Session() as sess: ...@@ -855,18 +945,18 @@ with tf.Session() as sess:
merged = tf.summary.merge_all() merged = tf.summary.merge_all()
vis_placeholder = tf.placeholder(tf.float32, [1,32,325,3]) # display weights, part 1 begin
some_image2 = tf.summary.image('custom_test', vis_placeholder) import numpy_visualize_weights as npw
l1 = NN_LAYOUT1.index(next(filter(lambda x: x!=0, NN_LAYOUT1))) l1 = NN_LAYOUT1.index(next(filter(lambda x: x!=0, NN_LAYOUT1)))
l2 = NN_LAYOUT2.index(next(filter(lambda x: x!=0, NN_LAYOUT2))) l2 = NN_LAYOUT2.index(next(filter(lambda x: x!=0, NN_LAYOUT2)))
with tf.variable_scope('g_fc_sub'+str(l1),reuse=tf.AUTO_REUSE):
w = tf.get_variable('weights',shape=[325,32]) wimg1_placeholder = tf.placeholder(tf.float32, [1,160,80,3])
wd = w[...,tf.newaxis] wimg1 = tf.summary.image('weights/sub_'+str(l1), wimg1_placeholder)
wds = tf.stack([wd]*3,axis=0)
#print(wd.shape) wimg2_placeholder = tf.placeholder(tf.float32, [1,120,60,3])
#some_image = tf.summary.image("tfsi_test",wds.eval(),max_outputs=1) wimg2 = tf.summary.image('weights/inter_'+str(l2), wimg2_placeholder)
some_image = tf.summary.image("tfsi_test",wds,max_outputs=1) # display weights, part 1 end
train_writer = tf.summary.FileWriter(TRAIN_PATH, sess.graph) train_writer = tf.summary.FileWriter(TRAIN_PATH, sess.graph)
test_writer = tf.summary.FileWriter(TEST_PATH, sess.graph) test_writer = tf.summary.FileWriter(TEST_PATH, sess.graph)
...@@ -899,8 +989,8 @@ with tf.Session() as sess: ...@@ -899,8 +989,8 @@ with tf.Session() as sess:
for i in range(dataset_train_size): for i in range(dataset_train_size):
try: try:
# train_summary,_, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out = sess.run( # train_summary,_, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, corr2d325_out = sess.run(
_, train_summary,_, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run( train_summary,_, G_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[ test_op, merged, [ merged,
G_opt, G_opt,
G_loss, G_loss,
out, out,
...@@ -970,23 +1060,27 @@ with tf.Session() as sess: ...@@ -970,23 +1060,27 @@ with tf.Session() as sess:
# _,_=sess.run([tf_ph_G_loss,tf_ph_sq_diff],feed_dict={tf_ph_G_loss:test_avg, tf_ph_sq_diff:test2_avg}) # _,_=sess.run([tf_ph_G_loss,tf_ph_sq_diff],feed_dict={tf_ph_G_loss:test_avg, tf_ph_sq_diff:test2_avg})
#train_writer.add_summary(some_image.eval(), epoch) #train_writer.add_summary(some_image.eval(), epoch)
# display weights, part 2 begin
l1 = NN_LAYOUT1.index(next(filter(lambda x: x!=0, NN_LAYOUT1))) l1 = NN_LAYOUT1.index(next(filter(lambda x: x!=0, NN_LAYOUT1)))
l2 = NN_LAYOUT2.index(next(filter(lambda x: x!=0, NN_LAYOUT2))) l2 = NN_LAYOUT2.index(next(filter(lambda x: x!=0, NN_LAYOUT2)))
with tf.variable_scope('g_fc_sub'+str(l1),reuse=tf.AUTO_REUSE):
w = tf.get_variable('weights',shape=[325,32])
wd = w[tf.newaxis,...]
wds = tf.stack([wd]*3,axis=-1)
timg_min = tf.reduce_min(w).eval() with tf.variable_scope('g_fc_sub'+str(l1),reuse=tf.AUTO_REUSE):
timg_max = tf.reduce_max(w).eval() w = tf.get_variable('weights',shape=[325,NN_LAYOUT1[l1]])
w = tf.transpose(w,(1,0))
img1 = npw.tiles(npw.coldmap(w.eval(),zero_span=0.0002),(1,4,9,9),tiles_per_line=2,borders=True)
img1 = img1[np.newaxis,...]
timg = wds.eval() train_writer.add_summary(wimg1.eval(feed_dict={wimg1_placeholder: img1}), epoch)
timg[:,:,:,0] = timg_min with tf.variable_scope('g_fc_inter'+str(l2),reuse=tf.AUTO_REUSE):
timg[:,:,:,1] = timg_min w = tf.get_variable('weights',shape=[144,NN_LAYOUT1[l2]])
timg = np.transpose(timg,(0,2,1,3)) w = tf.transpose(w,(1,0))
img2 = npw.tiles(npw.coldmap(w.eval(),zero_span=0.0002),(3,3,4,4),tiles_per_line=4,borders=True)
img2 = img2[np.newaxis,...]
train_writer.add_summary(some_image2.eval(feed_dict={vis_placeholder: timg}), epoch) train_writer.add_summary(wimg2.eval(feed_dict={wimg2_placeholder: img2}), epoch)
# display weights, part 2 end
train_writer.add_summary(train_summary, epoch) train_writer.add_summary(train_summary, epoch)
test_writer.add_summary(test_summaries[0], epoch) test_writer.add_summary(test_summaries[0], epoch)
......
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import math
# input: np.array(a,b) - 1 channel
# output: np.array(a,b,3) - 3 color channels
def coldmap(img,zero_span=0.2):
out = np.dstack(3*[img])
img_min = np.nanmin(img)
img_max = np.nanmax(img)
#print("min: "+str(img_min)+", max: "+str(img_max))
ch_r = out[...,0]
ch_g = out[...,1]
ch_b = out[...,2]
# blue for <0
ch_r[img<0] = 0
ch_g[img<0] = 0
ch_b[img<0] = -ch_b[img<0]
# red for >0
ch_r[img>0] = ch_b[img>0]
ch_g[img>0] = 0
ch_b[img>0] = 0
# green for 0
ch_r[img==0] = 0
ch_g[img==0] = img_max
ch_b[img==0] = 0
# green for zero vicinity
ch_r[abs(img)<zero_span/2] = 0
ch_g[abs(img)<zero_span/2] = img_max/2
ch_b[abs(img)<zero_span/2] = 0
return out
# has to be pre transposed
# it just suppose to match
def tiles(img,shape,tiles_per_line=1,borders=True):
# shape is (n0,n1,n2,n3)
# n0*n1*n2*n3 = img.shape[1]
img_min = np.nanmin(img)
img_max = np.nanmax(img)
outer_color = [img_max,img_max,img_min]
outer_color = [img_max,img_max,img_max]
inner_color = [img_max/4,img_max/4,img_min]
inner_color = [img_min,img_min,img_min]
#inner_color = [img_max,img_max,img_min]
group_h = shape[0]
group_w = shape[1]
group_size = group_h*group_w
tile_h = shape[2]
tile_w = shape[3]
tile_size = tile_h*tile_w
tpl = tiles_per_line
# main
tmp1 = []
for i in range(img.shape[0]):
if i%tpl==0:
tmp2 = []
tmp3 = []
for igh in range(group_h):
tmp4 = []
for igw in range(group_w):
si = (group_w*igh + igw + 0)*tile_size
ei = (group_w*igh + igw + 1)*tile_size
tile = img[i,si:ei]
tile = np.reshape(tile,(tile_h,tile_w,tile.shape[1]))
if borders:
if igw==group_w-1:
b_h_inner = [[inner_color]*(tile_w+0)]*( 1)
b_h_outer = [[outer_color]*(tile_w+0)]*( 1)
b_v_outer = [[outer_color]*( 1)]*(tile_h+1)
# outer hor
if igh==group_h-1:
tile = np.concatenate([tile,b_h_outer],axis=0)
# inner hor
else:
tile = np.concatenate([tile,b_h_inner],axis=0)
# outer vert
tile = np.concatenate([tile,b_v_outer],axis=1)
else:
b_v_inner = [[inner_color]*( 1)]*(tile_h+0)
b_h_inner = [[inner_color]*(tile_w+1)]*( 1)
b_h_outer = [[outer_color]*(tile_w+1)]*( 1)
# inner vert
tile = np.concatenate([tile,b_v_inner],axis=1)
# outer hor
if igh==group_h-1:
tile = np.concatenate([tile,b_h_outer],axis=0)
# inner hor
else:
tile = np.concatenate([tile,b_h_inner],axis=0)
tmp4.append(tile)
tmp3.append(np.concatenate(tmp4,axis=1))
tmp2.append(np.concatenate(tmp3,axis=0))
if i%tpl==(tpl-1):
tmp1.append(np.concatenate(tmp2,axis=1))
out = np.concatenate(tmp1,axis=0)
#out = img
return out
if __name__=="__main__":
#image = np.zeros((32,144))
image = np.random.rand(32,144)
rgb_img_0 = tiles(coldmap(image),(3,3,4,4),tiles_per_line=8,borders=True)
fig = plt.figure()
fig.suptitle("Test")
plt.imshow(rgb_img_0)
plt.show()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment