Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
python3-imagej-tiff
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
python3-imagej-tiff
Commits
07c7d46a
Commit
07c7d46a
authored
Aug 10, 2018
by
Oleg Dzhimiev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
display weights
parent
a8911582
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
160 additions
and
79 deletions
+160
-79
nn_ds_inmem4_tmp.py
nn_ds_inmem4_tmp.py
+160
-79
No files found.
nn_ds_inmem4_tmp.py
View file @
07c7d46a
#!/usr/bin/env python3
#!/usr/bin/env python3
from
numpy
import
float64
from
numpy
import
float64
from
_stat
import
S_IEXEC
__copyright__
=
"Copyright 2018, Elphel, Inc."
__copyright__
=
"Copyright 2018, Elphel, Inc."
__license__
=
"GPL-3.0+"
__license__
=
"GPL-3.0+"
...
@@ -33,17 +34,17 @@ MAX_EPOCH = 500
...
@@ -33,17 +34,17 @@ MAX_EPOCH = 500
#LR = 1e-4 # learning rate
#LR = 1e-4 # learning rate
LR
=
1e-3
# learning rate
LR
=
1e-3
# learning rate
USE_CONFIDENCE
=
False
USE_CONFIDENCE
=
False
ABSOLUTE_DISPARITY
=
Fals
e
# True # False
ABSOLUTE_DISPARITY
=
Tru
e
# True # False
DEBUG_PLT_LOSS
=
True
DEBUG_PLT_LOSS
=
True
FEATURES_PER_TILE
=
324
FEATURES_PER_TILE
=
324
EPOCHS_TO_RUN
=
10000
#0
EPOCHS_TO_RUN
=
10000
#0
RUN_TOT_AVG
=
100
# last batches to average. Epoch is 307 training batches
RUN_TOT_AVG
=
100
# last batches to average. Epoch is 307 training batches
BATCH_SIZE
=
1000
# Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
BATCH_SIZE
=
1000
# Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SHUFFLE_EPOCH
=
True
SHUFFLE_EPOCH
=
True
NET_ARCH
=
3
# overwrite with argv?
NET_ARCH
=
0
# overwrite with argv?
#DEBUG_PACK_TILES = True
#DEBUG_PACK_TILES = True
SUFFIX
=
str
(
NET_ARCH
)
+
([
"R"
,
"A"
][
ABSOLUTE_DISPARITY
])
SUFFIX
=
str
(
NET_ARCH
)
+
([
"R"
,
"A"
][
ABSOLUTE_DISPARITY
])
MAX_TRAIN_FILES_TFR
=
4
MAX_TRAIN_FILES_TFR
=
6
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class
bcolors
:
class
bcolors
:
HEADER
=
'
\033
[95m'
HEADER
=
'
\033
[95m'
...
@@ -211,6 +212,9 @@ def lrelu(x):
...
@@ -211,6 +212,9 @@ def lrelu(x):
# return tf.nn.relu(x)
# return tf.nn.relu(x)
def
network_fc_simple
(
input
,
arch
=
0
):
def
network_fc_simple
(
input
,
arch
=
0
):
global
image_summary_op1
layouts
=
{
0
:[
0
,
0
,
0
,
32
,
20
,
16
],
layouts
=
{
0
:[
0
,
0
,
0
,
32
,
20
,
16
],
1
:[
0
,
0
,
0
,
256
,
128
,
64
],
1
:[
0
,
0
,
0
,
256
,
128
,
64
],
2
:[
0
,
128
,
32
,
32
,
32
,
16
],
2
:[
0
,
128
,
32
,
32
,
32
,
16
],
...
@@ -226,9 +230,86 @@ def network_fc_simple(input, arch = 0):
...
@@ -226,9 +230,86 @@ def network_fc_simple(input, arch = 0):
inp
=
input
inp
=
input
fc
.
append
(
slim
.
fully_connected
(
inp
,
num_outs
,
activation_fn
=
lrelu
,
scope
=
'g_fc'
+
str
(
i
)))
fc
.
append
(
slim
.
fully_connected
(
inp
,
num_outs
,
activation_fn
=
lrelu
,
scope
=
'g_fc'
+
str
(
i
)))
with
tf
.
variable_scope
(
'g_fc'
+
str
(
i
)
+
'/fully_connected'
,
reuse
=
tf
.
AUTO_REUSE
):
#with tf.variable_scope('g_fc'+str(i)+'/fully_connected',reuse=tf.AUTO_REUSE):
with
tf
.
variable_scope
(
'g_fc'
+
str
(
i
),
reuse
=
tf
.
AUTO_REUSE
):
w
=
tf
.
get_variable
(
'weights'
,
shape
=
[
inp
.
shape
[
1
],
num_outs
])
w
=
tf
.
get_variable
(
'weights'
,
shape
=
[
inp
.
shape
[
1
],
num_outs
])
b
=
tf
.
get_variable
(
'weights'
,
shape
=
[
inp
.
shape
[
1
],
num_outs
])
#image = tf.get_variable('w_images',shape=[1, inp.shape[1],num_outs,1])
if
(
i
==
3
):
# red border
grid
=
tf
.
constant
([
255
,
100
,
100
],
dtype
=
tf
.
float32
,
name
=
"GRID"
)
# (325,32)
wimg_1
=
w
# (32,325)
wimg_2
=
tf
.
transpose
(
wimg_1
,[
1
,
0
])
# (32,324)
wimg_3
=
wimg_2
[:,:
-
1
]
# res?
#wimg_res = tf.get_variable('wimg_res',shape=[32*(9+1),(9+1)*4, 3])
# long list
tmp1
=
[]
for
mi
in
range
(
32
):
tmp2
=
[]
for
mj
in
range
(
4
):
s_i
=
mj
*
81
e_i
=
(
mj
+
1
)
*
81
tile
=
tf
.
reshape
(
wimg_3
[
mi
,
s_i
:
e_i
],
shape
=
(
9
,
9
))
tiles
=
tf
.
stack
([
tile
]
*
3
,
axis
=
2
)
#gtiles1 = tf.concat([tiles, tf.reshape(9*[grid],shape=(1,9,3))],axis=0)
gtiles1
=
tf
.
concat
([
tiles
,
tf
.
expand_dims
(
9
*
[
grid
],
0
)],
axis
=
0
)
gtiles2
=
tf
.
concat
([
gtiles1
,
tf
.
expand_dims
(
10
*
[
grid
],
1
)],
axis
=
1
)
tmp2
.
append
(
gtiles2
)
ts
=
tf
.
concat
(
tmp2
,
axis
=
2
)
tmp1
.
append
(
ts
)
image_summary_op2
=
tf
.
concat
(
tmp1
,
axis
=
0
)
#image_summary_op1 = tf.assign(wimg_res,tf.zeros(shape=[32*(9+1),(9+1)*4, 3],dtype=tf.float32))
#wimgo1 = tf.zeros(shape=[32*(9+1),(9+1)*4, 3],dtype=tf.float32)
#tf.summary.image("wimg_res1",tf.reshape(wimg_res,[1,32*(9+1),(9+1)*4, 3]))
#tf.summary.image("wimgo1",tf.reshape(wimgo1,[1,32*(9+1),(9+1)*4, 3]))
#tf.summary.image("wimgo2",tf.reshape(wimgo2,[1,32*(9+1),(9+1)*4, 3]))
tf
.
summary
.
image
(
"SWEIGTS"
,
tf
.
reshape
(
gtiles2
,[
1
,
10
,
10
,
3
]))
tf
.
summary
.
image
(
"WEIGTS"
,
tf
.
reshape
(
image_summary_op2
,[
1
,
320
,
40
,
3
]))
# borders
#for mi in range(0,wimg_res.shape[0],10):
# for mj in range(wimg_res.shape[1]):
# wimg_res[mi,mj].assign([255,255,255])
#wimg_res[9::(9+1),:].assign([255,0,0])
#wimg_res[:,9::(9+1)].assign([255,0,0])
#for mi in range(0,wimg_res.shape[0],10):
# print(mi)
#wimg_res = tf.stack([wing_res,])
#wimg_1 = tf.reshape(w,[1,inp.shape[1],num_outs,1])
#wimg_1t = tf.transpose(wimg_1,[0,2,1,3])
# w = w[a,b]
# wt = w[b,a]
# for i in range(b):
# tmp =
#tf.summary.image("wimg_1",wimg_1)
#tf.summary.image("wimg_1t",wimg_1t)
#tf.summary.image("wimg_res1",tf.reshape(wimg_res,[1,32*(9+1),(9+1)*4, 3]))
b
=
tf
.
get_variable
(
'biases'
,
shape
=
[
num_outs
])
tf
.
summary
.
histogram
(
"weights"
,
w
)
tf
.
summary
.
histogram
(
"weights"
,
w
)
tf
.
summary
.
histogram
(
"biases"
,
b
)
tf
.
summary
.
histogram
(
"biases"
,
b
)
"""
"""
...
@@ -247,7 +328,8 @@ def network_fc_simple(input, arch = 0):
...
@@ -247,7 +328,8 @@ def network_fc_simple(input, arch = 0):
with
tf
.
variable_scope
(
'g_fc_out'
,
reuse
=
tf
.
AUTO_REUSE
):
with
tf
.
variable_scope
(
'g_fc_out'
,
reuse
=
tf
.
AUTO_REUSE
):
w
=
tf
.
get_variable
(
'weights'
,
shape
=
[
fc
[
-
1
]
.
shape
[
1
],
2
])
w
=
tf
.
get_variable
(
'weights'
,
shape
=
[
fc
[
-
1
]
.
shape
[
1
],
2
])
b
=
tf
.
get_variable
(
'biases'
,
shape
=
[
fc
[
-
1
]
.
shape
[
1
],
2
])
tf
.
summary
.
image
(
"wimage"
,
tf
.
reshape
(
w
,[
1
,
fc
[
-
1
]
.
shape
[
1
],
2
,
1
]))
b
=
tf
.
get_variable
(
'biases'
,
shape
=
[
2
])
tf
.
summary
.
histogram
(
"weights"
,
w
)
tf
.
summary
.
histogram
(
"weights"
,
w
)
tf
.
summary
.
histogram
(
"biases"
,
b
)
tf
.
summary
.
histogram
(
"biases"
,
b
)
...
@@ -256,6 +338,7 @@ def network_fc_simple(input, arch = 0):
...
@@ -256,6 +338,7 @@ def network_fc_simple(input, arch = 0):
with
tf
.
variable_scope
(
'g_fc_out'
,
reuse
=
tf
.
AUTO_REUSE
):
with
tf
.
variable_scope
(
'g_fc_out'
,
reuse
=
tf
.
AUTO_REUSE
):
w
=
tf
.
get_variable
(
'weights'
,
shape
=
[
fc
[
-
1
]
.
shape
[
1
],
1
])
w
=
tf
.
get_variable
(
'weights'
,
shape
=
[
fc
[
-
1
]
.
shape
[
1
],
1
])
tf
.
summary
.
image
(
"wimage"
,
tf
.
reshape
(
w
,[
1
,
fc
[
-
1
]
.
shape
[
1
],
1
,
1
]))
b
=
tf
.
get_variable
(
'biases'
,
shape
=
[
1
])
b
=
tf
.
get_variable
(
'biases'
,
shape
=
[
1
])
tf
.
summary
.
histogram
(
"weights"
,
w
)
tf
.
summary
.
histogram
(
"weights"
,
w
)
tf
.
summary
.
histogram
(
"biases"
,
b
)
tf
.
summary
.
histogram
(
"biases"
,
b
)
...
@@ -504,6 +587,8 @@ with tf.Session() as sess:
...
@@ -504,6 +587,8 @@ with tf.Session() as sess:
# if SHUFFLE_EPOCH:
# if SHUFFLE_EPOCH:
# dataset_train = dataset_train.shuffle(buffer_size=10000)
# dataset_train = dataset_train.shuffle(buffer_size=10000)
# RUN TRAIN SESSION
sess
.
run
(
iterator_train
.
initializer
,
feed_dict
=
{
corr2d_train_placeholder
:
corr2d_trains
[
train_file_index
],
sess
.
run
(
iterator_train
.
initializer
,
feed_dict
=
{
corr2d_train_placeholder
:
corr2d_trains
[
train_file_index
],
target_disparity_train_placeholder
:
target_disparity_trains
[
train_file_index
],
target_disparity_train_placeholder
:
target_disparity_trains
[
train_file_index
],
gt_ds_train_placeholder
:
gt_ds_trains
[
train_file_index
]})
gt_ds_train_placeholder
:
gt_ds_trains
[
train_file_index
]})
...
@@ -530,6 +615,7 @@ with tf.Session() as sess:
...
@@ -530,6 +615,7 @@ with tf.Session() as sess:
#train_writer.add_summary(train_summary, i)
#train_writer.add_summary(train_summary, i)
loss_train_hist
[
i
]
=
G_loss_trained
loss_train_hist
[
i
]
=
G_loss_trained
loss2_train_hist
[
i
]
=
out_cost1
loss2_train_hist
[
i
]
=
out_cost1
except
tf
.
errors
.
OutOfRangeError
:
except
tf
.
errors
.
OutOfRangeError
:
print
(
"train done at step
%
d"
%
(
i
))
print
(
"train done at step
%
d"
%
(
i
))
break
break
...
@@ -537,10 +623,7 @@ with tf.Session() as sess:
...
@@ -537,10 +623,7 @@ with tf.Session() as sess:
train_avg
=
np
.
average
(
loss_train_hist
)
.
astype
(
np
.
float32
)
train_avg
=
np
.
average
(
loss_train_hist
)
.
astype
(
np
.
float32
)
train2_avg
=
np
.
average
(
loss2_train_hist
)
.
astype
(
np
.
float32
)
train2_avg
=
np
.
average
(
loss2_train_hist
)
.
astype
(
np
.
float32
)
#_,_=sess.run([tf_ph_G_loss,tf_ph_sq_diff],feed_dict={tf_ph_G_loss:train_avg, tf_ph_sq_diff:train2_avg})
# RUN TEST SESSION
#tf_ph_G_loss = tf.placeholder(tf.float32,shape=None,name='G_loss_avg')
#tf_ph_sq_diff = tf.placeholder(tf.float32,shape=None,name='sq_diff_avg')
sess
.
run
(
iterator_train
.
initializer
,
feed_dict
=
{
corr2d_train_placeholder
:
corr2d_test
,
sess
.
run
(
iterator_train
.
initializer
,
feed_dict
=
{
corr2d_train_placeholder
:
corr2d_test
,
target_disparity_train_placeholder
:
target_disparity_test
,
target_disparity_train_placeholder
:
target_disparity_test
,
...
@@ -567,12 +650,10 @@ with tf.Session() as sess:
...
@@ -567,12 +650,10 @@ with tf.Session() as sess:
print
(
"test done at step
%
d"
%
(
i
))
print
(
"test done at step
%
d"
%
(
i
))
break
break
# print_time("%d:%d -> %f"%(epoch,i,G_current))
test_avg
=
np
.
average
(
loss_test_hist
)
.
astype
(
np
.
float32
)
test_avg
=
np
.
average
(
loss_test_hist
)
.
astype
(
np
.
float32
)
test2_avg
=
np
.
average
(
loss2_test_hist
)
.
astype
(
np
.
float32
)
test2_avg
=
np
.
average
(
loss2_test_hist
)
.
astype
(
np
.
float32
)
# _,_=sess.run([tf_ph_G_loss,tf_ph_sq_diff],feed_dict={tf_ph_G_loss:test_avg, tf_ph_sq_diff:test2_avg})
# they include image summaries as well
train_writer
.
add_summary
(
train_summary
,
epoch
)
train_writer
.
add_summary
(
train_summary
,
epoch
)
test_writer
.
add_summary
(
test_summary
,
epoch
)
test_writer
.
add_summary
(
test_summary
,
epoch
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment