Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
python3-imagej-tiff
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
python3-imagej-tiff
Commits
a1c3b782
Commit
a1c3b782
authored
Jul 07, 2018
by
Oleg Dzhimiev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tensorboard graphs tests
parent
e56bda2f
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
149 additions
and
23 deletions
+149
-23
test_nn_feed.py
test_nn_feed.py
+120
-17
test_nn_infer.py
test_nn_infer.py
+29
-6
No files found.
test_nn_feed.py
View file @
a1c3b782
...
@@ -23,6 +23,8 @@ import itertools
...
@@ -23,6 +23,8 @@ import itertools
import
time
import
time
import
matplotlib.pyplot
as
plt
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class
bcolors
:
class
bcolors
:
HEADER
=
'
\033
[95m'
HEADER
=
'
\033
[95m'
...
@@ -45,6 +47,10 @@ VALUES_LAYER_NAME = 'other'
...
@@ -45,6 +47,10 @@ VALUES_LAYER_NAME = 'other'
LAYERS_OF_INTEREST
=
[
'diagm-pair'
,
'diago-pair'
,
'hor-pairs'
,
'vert-pairs'
]
LAYERS_OF_INTEREST
=
[
'diagm-pair'
,
'diago-pair'
,
'hor-pairs'
,
'vert-pairs'
]
RADIUS
=
1
RADIUS
=
1
DEBUG_PLT_LOSS
=
False
# If false - will not pack or rescal
DEBUG_PACK_TILES
=
True
try
:
try
:
src
=
sys
.
argv
[
1
]
src
=
sys
.
argv
[
1
]
except
IndexError
:
except
IndexError
:
...
@@ -214,40 +220,62 @@ def lrelu(x):
...
@@ -214,40 +220,62 @@ def lrelu(x):
def
network
(
input
):
def
network
(
input
):
fc1
=
slim
.
fully_connected
(
input
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc1'
)
fc1
=
slim
.
fully_connected
(
input
,
2048
,
activation_fn
=
lrelu
,
scope
=
'g_fc1'
)
fc2
=
slim
.
fully_connected
(
fc1
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc2'
)
fc2
=
slim
.
fully_connected
(
fc1
,
1024
,
activation_fn
=
lrelu
,
scope
=
'g_fc2'
)
fc3
=
slim
.
fully_connected
(
fc2
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc3'
)
fc3
=
slim
.
fully_connected
(
fc2
,
512
,
activation_fn
=
lrelu
,
scope
=
'g_fc3'
)
fc4
=
slim
.
fully_connected
(
fc3
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc4'
)
fc4
=
slim
.
fully_connected
(
fc3
,
8
,
activation_fn
=
lrelu
,
scope
=
'g_fc4'
)
fc5
=
slim
.
fully_connected
(
fc4
,
2
,
activation_fn
=
lrelu
,
scope
=
'g_fc5'
)
fc5
=
slim
.
fully_connected
(
fc4
,
4
,
activation_fn
=
lrelu
,
scope
=
'g_fc5'
)
fc6
=
slim
.
fully_connected
(
fc5
,
2
,
activation_fn
=
lrelu
,
scope
=
'g_fc6'
)
return
fc
5
return
fc
6
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
101
])
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
101
])
gt
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
2
])
gt
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
2
])
#losses = tf.get_variable("losses", [None])
#update_operation = tf.assign(losses,tf.concat([losses,G_loss]))
#mean_loss = tf.reduce_mean(losses)
#tf.summary.scalar('gt_value', gt[0])
#tf.summary.scalar('gt_confidence', gt[1])
#tf.summary.scalar('gt_value',gt[0,0])
#cf_cutoff = tf.constant(tf.float32,[None,1])
#cf_cutoff = tf.constant(tf.float32,[None,1])
out
=
network
(
in_tile
)
out
=
network
(
in_tile
)
#tf.summary.scalar('out_value', out[0,0])
#tf.summary.scalar('out_confidence', out[1])
# min cutoff
# min cutoff
cf_cutoff
=
0.173303
cf_cutoff
=
0.173303
cf_w
=
tf
.
pow
(
tf
.
maximum
(
gt
[:,
1
]
-
cf_cutoff
,
0.0
),
1
)
cf_w
=
tf
.
pow
(
tf
.
maximum
(
gt
[:,
1
]
-
cf_cutoff
,
0.0
),
1
)
cf_wsum
=
tf
.
reduce_sum
(
cf_w
)
#cf_wsum = tf.reduce_sum(cf_w[~tf.is_nan(cf_w)])
cf_w_norm
=
cf_w
/
cf_wsum
#cf_w_norm = cf_w/cf_wsum
cf_w_norm
=
tf
.
nn
.
softmax
(
cf_w
)
#out_cf = out[:,1]
#out_cf = out[:,1]
G_loss
=
tf
.
reduce_mean
(
tf
.
abs
(
out
[:,
0
]
-
cf_w_norm
*
gt
[:,
0
]))
G_loss
=
tf
.
reduce_mean
(
tf
.
abs
(
tf
.
nn
.
softmax
(
out
[:,
1
])
*
out
[:,
0
]
-
cf_w_norm
*
gt
[:,
0
]))
tf
.
summary
.
scalar
(
'loss'
,
G_loss
)
tf
.
summary
.
scalar
(
'prediction'
,
out
[
0
,
0
])
tf
.
summary
.
scalar
(
'ground truth'
,
gt
[
0
,
0
])
t_vars
=
tf
.
trainable_variables
()
t_vars
=
tf
.
trainable_variables
()
lr
=
tf
.
placeholder
(
tf
.
float32
)
lr
=
tf
.
placeholder
(
tf
.
float32
)
G_opt
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
lr
)
.
minimize
(
G_loss
,
var_list
=
[
var
for
var
in
t_vars
if
var
.
name
.
startswith
(
'g_'
)])
G_opt
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
lr
)
.
minimize
(
G_loss
,
var_list
=
[
var
for
var
in
t_vars
if
var
.
name
.
startswith
(
'g_'
)])
saver
=
tf
.
train
.
Saver
()
saver
=
tf
.
train
.
Saver
()
# ?!!!!!
merged
=
tf
.
summary
.
merge_all
()
train_writer
=
tf
.
summary
.
FileWriter
(
result_dir
+
'/train'
,
sess
.
graph
)
test_writer
=
tf
.
summary
.
FileWriter
(
result_dir
+
'/test'
)
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
ckpt
=
tf
.
train
.
get_checkpoint_state
(
checkpoint_dir
)
ckpt
=
tf
.
train
.
get_checkpoint_state
(
checkpoint_dir
)
...
@@ -263,17 +291,37 @@ for folder in allfolders:
...
@@ -263,17 +291,37 @@ for folder in allfolders:
g_loss
=
np
.
zeros
((
packed_tiles
.
shape
[
0
]
*
packed_tiles
.
shape
[
1
],
1
))
g_loss
=
np
.
zeros
((
packed_tiles
.
shape
[
0
]
*
packed_tiles
.
shape
[
1
],
1
))
learning_rate
=
1e-4
recorded_loss
=
[]
recorded_mean_loss
=
[]
recorded_gt_d
=
[]
recorded_gt_c
=
[]
recorded_pr_d
=
[]
recorded_pr_c
=
[]
LR
=
1e-4
print
(
bcolors
.
HEADER
+
"Last Epoch = "
+
str
(
lastepoch
)
+
bcolors
.
ENDC
)
print
(
bcolors
.
HEADER
+
"Last Epoch = "
+
str
(
lastepoch
)
+
bcolors
.
ENDC
)
if
DEBUG_PLT_LOSS
:
plt
.
ion
()
# something about plotting
plt
.
figure
(
1
,
figsize
=
(
4
,
12
))
pass
# RUN
for
epoch
in
range
(
lastepoch
,
1
):
for
epoch
in
range
(
lastepoch
,
1
):
#for epoch in range(lastepoch,4001):
#for epoch in range(lastepoch,4001):
if
os
.
path
.
isdir
(
"result/
%04
d"
%
epoch
):
if
os
.
path
.
isdir
(
"result/
%04
d"
%
epoch
):
continue
continue
cnt
=
0
cnt
=
0
if
epoch
>
2000
:
if
epoch
>
2000
:
learning_rate
=
1e-5
LR
=
1e-5
for
ind
in
np
.
random
.
permutation
(
packed_tiles
.
shape
[
0
]
*
packed_tiles
.
shape
[
1
]):
for
ind
in
np
.
random
.
permutation
(
packed_tiles
.
shape
[
0
]
*
packed_tiles
.
shape
[
1
]):
...
@@ -313,18 +361,73 @@ for epoch in range(lastepoch,1):
...
@@ -313,18 +361,73 @@ for epoch in range(lastepoch,1):
#print(bcolors.WARNING+"Found NaN, skipping iteration for tile "+str(i)+","+str(j)+bcolors.ENDC)
#print(bcolors.WARNING+"Found NaN, skipping iteration for tile "+str(i)+","+str(j)+bcolors.ENDC)
pass
pass
else
:
else
:
_
,
G_current
,
output
=
sess
.
run
([
G_opt
,
G_loss
,
out
],
feed_dict
=
{
in_tile
:
input_patch
,
gt
:
gt_patch
,
lr
:
learning_rate
})
run_options
=
tf
.
RunOptions
(
trace_level
=
tf
.
RunOptions
.
FULL_TRACE
)
run_metadata
=
tf
.
RunMetadata
()
_
,
G_current
,
output
,
summary
=
sess
.
run
([
G_opt
,
G_loss
,
out
,
merged
],
feed_dict
=
{
in_tile
:
input_patch
,
gt
:
gt_patch
,
lr
:
LR
},
options
=
run_options
,
run_metadata
=
run_metadata
)
#_,G_current,output = sess.run([G_opt,G_loss,out],feed_dict={in_tile:input_patch,gt:gt_patch,lr:LR})
g_loss
[
ind
]
=
G_current
g_loss
[
ind
]
=
G_current
mean_loss
=
np
.
mean
(
g_loss
[
np
.
where
(
g_loss
)])
if
DEBUG_PLT_LOSS
:
recorded_loss
.
append
(
G_current
)
recorded_mean_loss
.
append
(
mean_loss
)
recorded_pr_d
.
append
(
output
[
0
,
0
])
recorded_pr_c
.
append
(
output
[
0
,
1
])
recorded_gt_d
.
append
(
gt_patch
[
0
,
0
])
recorded_gt_c
.
append
(
gt_patch
[
0
,
1
])
plt
.
clf
()
print
(
"
%
d
%
d Loss=
%.3
f CurrentLoss=
%.3
f Time=
%.3
f"
%
(
epoch
,
cnt
,
np
.
mean
(
g_loss
[
np
.
where
(
g_loss
)]),
G_current
,
time
.
time
()
-
st
))
plt
.
subplot
(
311
)
plt
.
plot
(
recorded_loss
,
label
=
'loss'
)
plt
.
plot
(
recorded_mean_loss
,
label
=
'mean loss'
,
color
=
'red'
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Loss'
)
plt
.
title
(
"Loss=
%.5
f, Mean Loss=
%.5
f"
%
(
G_current
,
mean_loss
),
fontdict
=
{
'size'
:
20
,
'color'
:
'red'
})
#plt.text(0.5, 0.5, 'Loss=%.5f' % G_current, fontdict={'size': 20, 'color': 'red'})
plt
.
subplot
(
312
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Disparities'
)
plt
.
plot
(
recorded_gt_d
,
label
=
'gt_d'
,
color
=
'green'
)
plt
.
plot
(
recorded_pr_d
,
label
=
'pr_d'
,
color
=
'red'
)
plt
.
legend
(
loc
=
'best'
,
ncol
=
1
)
plt
.
subplot
(
313
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Confidences'
)
plt
.
plot
(
recorded_gt_c
,
label
=
'gt_c'
,
color
=
'green'
)
plt
.
plot
(
recorded_pr_c
,
label
=
'pr_c'
,
color
=
'red'
)
plt
.
legend
(
loc
=
'best'
,
ncol
=
1
)
plt
.
pause
(
0.001
)
else
:
print
(
"
%
d
%
d Loss=
%.3
f CurrentLoss=
%.3
f Time=
%.3
f"
%
(
epoch
,
cnt
,
mean_loss
,
G_current
,
time
.
time
()
-
st
))
train_writer
.
add_run_metadata
(
run_metadata
,
'step
%
d'
%
cnt
)
#test_writer.add_summary(summary,cnt)
train_writer
.
add_summary
(
summary
,
cnt
)
if
epoch
%
save_freq
==
0
:
if
epoch
%
save_freq
==
0
:
if
not
os
.
path
.
isdir
(
result_dir
+
'
%04
d'
%
epoch
):
if
not
os
.
path
.
isdir
(
result_dir
+
'
%04
d'
%
epoch
):
os
.
makedirs
(
result_dir
+
'
%04
d'
%
epoch
)
os
.
makedirs
(
result_dir
+
'
%04
d'
%
epoch
)
saver
.
save
(
sess
,
checkpoint_dir
+
'model.ckpt'
)
saver
.
save
(
sess
,
checkpoint_dir
+
'model.ckpt'
)
train_writer
.
close
()
test_writer
.
close
()
print_time
()
print_time
()
print
(
bcolors
.
OKGREEN
+
"time: "
+
str
(
time
.
time
())
+
bcolors
.
ENDC
)
print
(
bcolors
.
OKGREEN
+
"time: "
+
str
(
time
.
time
())
+
bcolors
.
ENDC
)
plt
.
ioff
()
plt
.
show
()
test_nn_infer.py
View file @
a1c3b782
...
@@ -68,13 +68,14 @@ def lrelu(x):
...
@@ -68,13 +68,14 @@ def lrelu(x):
def
network
(
input
):
def
network
(
input
):
fc1
=
slim
.
fully_connected
(
input
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc1'
)
fc1
=
slim
.
fully_connected
(
input
,
2048
,
activation_fn
=
lrelu
,
scope
=
'g_fc1'
)
fc2
=
slim
.
fully_connected
(
fc1
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc2'
)
fc2
=
slim
.
fully_connected
(
fc1
,
1024
,
activation_fn
=
lrelu
,
scope
=
'g_fc2'
)
fc3
=
slim
.
fully_connected
(
fc2
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc3'
)
fc3
=
slim
.
fully_connected
(
fc2
,
512
,
activation_fn
=
lrelu
,
scope
=
'g_fc3'
)
fc4
=
slim
.
fully_connected
(
fc3
,
101
,
activation_fn
=
lrelu
,
scope
=
'g_fc4'
)
fc4
=
slim
.
fully_connected
(
fc3
,
8
,
activation_fn
=
lrelu
,
scope
=
'g_fc4'
)
fc5
=
slim
.
fully_connected
(
fc4
,
2
,
activation_fn
=
lrelu
,
scope
=
'g_fc5'
)
fc5
=
slim
.
fully_connected
(
fc4
,
4
,
activation_fn
=
lrelu
,
scope
=
'g_fc5'
)
fc6
=
slim
.
fully_connected
(
fc5
,
2
,
activation_fn
=
lrelu
,
scope
=
'g_fc6'
)
return
fc
5
return
fc
6
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
...
@@ -147,6 +148,9 @@ for item in tlist:
...
@@ -147,6 +148,9 @@ for item in tlist:
packed_tiles
=
np
.
array
([[
pile
.
pack_tile
(
tiles
[
i
,
j
],
ptab
)
for
j
in
range
(
tiles
.
shape
[
1
])]
for
i
in
range
(
tiles
.
shape
[
0
])])
packed_tiles
=
np
.
array
([[
pile
.
pack_tile
(
tiles
[
i
,
j
],
ptab
)
for
j
in
range
(
tiles
.
shape
[
1
])]
for
i
in
range
(
tiles
.
shape
[
0
])])
packed_tiles
=
np
.
dstack
((
packed_tiles
,
values
[:,:,
0
]))
packed_tiles
=
np
.
dstack
((
packed_tiles
,
values
[:,:,
0
]))
print
(
packed_tiles
.
shape
)
print
(
"ENDDD!"
)
# flatten
# flatten
packed_tiles_flat
=
packed_tiles
.
reshape
(
-
1
,
packed_tiles
.
shape
[
-
1
])
packed_tiles_flat
=
packed_tiles
.
reshape
(
-
1
,
packed_tiles
.
shape
[
-
1
])
values_flat
=
values
.
reshape
(
-
1
,
values
.
shape
[
-
1
])
values_flat
=
values
.
reshape
(
-
1
,
values
.
shape
[
-
1
])
...
@@ -160,6 +164,25 @@ for item in tlist:
...
@@ -160,6 +164,25 @@ for item in tlist:
print
(
"Output shape: "
+
str
(
output
.
shape
))
print
(
"Output shape: "
+
str
(
output
.
shape
))
output_image
=
np
.
reshape
(
output
,(
tiles
.
shape
[
0
],
tiles
.
shape
[
1
],
-
1
))
print
(
output_image
.
shape
)
import
imagej_tiffwriter
# 1 prediction
# 2 ground truth
# difference 1 - 2
im1
=
output_image
[:,:,
0
]
im2
=
values
[:,:,
1
]
im3
=
im1
-
im2
tif
=
np
.
dstack
((
im1
,
im2
,
im3
))
imagej_tiffwriter
.
save
(
'prediction_results.tiff'
,
tif
)
sys
.
exit
(
0
)
# so, let's print
# so, let's print
for
i
in
range
(
output
.
shape
[
0
]):
for
i
in
range
(
output
.
shape
[
0
]):
p
=
output
[
i
,
0
]
p
=
output
[
i
,
0
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment