Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
python3-imagej-tiff
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
python3-imagej-tiff
Commits
0452a446
Commit
0452a446
authored
Jul 13, 2018
by
Oleg Dzhimiev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
1. packing type 2
2. epochs, batches changes
parent
45a473f0
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
276 additions
and
110 deletions
+276
-110
pack_tile.py
pack_tile.py
+134
-3
test_nn_feed.py
test_nn_feed.py
+120
-104
test_nn_infer.py
test_nn_infer.py
+22
-3
No files found.
pack_tile.py
View file @
0452a446
...
@@ -6,7 +6,8 @@ __email__ = "oleg@elphel.com"
...
@@ -6,7 +6,8 @@ __email__ = "oleg@elphel.com"
import
numpy
as
np
import
numpy
as
np
# pack from 9x9x4 to 25x1
# pack from 9x9x4 to 100x1
# type 1, all the same
def
pack_tile_type1
(
tile
):
def
pack_tile_type1
(
tile
):
out
=
np
.
empty
(
100
)
out
=
np
.
empty
(
100
)
...
@@ -125,13 +126,143 @@ def pack_tile_type1(tile):
...
@@ -125,13 +126,143 @@ def pack_tile_type1(tile):
return
out
return
out
# pack from 9x9x4 to 104x1
# type 1, all the same
def
pack_tile_type2
(
tile
):
out
=
np
.
empty
(
104
)
# pack diagm-pair (not tested)
l
=
np
.
ravel
(
tile
[:,:,
0
])
out
[
0
]
=
1.0
*
l
[
0
]
+
1.0
*
l
[
1
]
+
1.0
*
l
[
2
]
+
1.0
*
l
[
9
]
+
1.0
*
l
[
10
]
+
1.0
*
l
[
18
]
out
[
1
]
=
1.0
*
l
[
3
]
+
1.0
*
l
[
11
]
+
1.0
*
l
[
19
]
+
1.0
*
l
[
27
]
out
[
2
]
=
1.0
*
l
[
4
]
+
1.0
*
l
[
12
]
+
1.0
*
l
[
20
]
+
1.0
*
l
[
28
]
+
1.0
*
l
[
36
]
out
[
3
]
=
1.0
*
l
[
5
]
+
1.0
*
l
[
6
]
+
1.0
*
l
[
14
]
out
[
4
]
=
1.0
*
l
[
7
]
+
1.0
*
l
[
15
]
+
1.0
*
l
[
23
]
out
[
5
]
=
1.0
*
l
[
8
]
+
1.0
*
l
[
16
]
+
1.0
*
l
[
24
]
out
[
6
]
=
1.0
*
l
[
13
]
+
1.0
*
l
[
21
]
+
1.0
*
l
[
29
]
+
1.0
*
l
[
37
]
out
[
7
]
=
1.0
*
l
[
17
]
+
1.0
*
l
[
25
]
+
1.0
*
l
[
33
]
out
[
8
]
=
1.0
*
l
[
22
]
+
1.0
*
l
[
30
]
+
1.0
*
l
[
38
]
out
[
9
]
=
1.0
*
l
[
26
]
+
1.0
*
l
[
34
]
+
1.0
*
l
[
35
]
out
[
10
]
=
1.0
*
l
[
31
]
out
[
11
]
=
1.0
*
l
[
32
]
out
[
12
]
=
1.0
*
l
[
39
]
out
[
13
]
=
1.0
*
l
[
40
]
out
[
14
]
=
1.0
*
l
[
41
]
out
[
15
]
=
1.0
*
l
[
42
]
+
1.0
*
l
[
50
]
+
1.0
*
l
[
58
]
out
[
16
]
=
1.0
*
l
[
43
]
+
1.0
*
l
[
51
]
+
1.0
*
l
[
59
]
+
1.0
*
l
[
67
]
out
[
17
]
=
1.0
*
l
[
44
]
+
1.0
*
l
[
52
]
+
1.0
*
l
[
60
]
+
1.0
*
l
[
68
]
+
1.0
*
l
[
76
]
out
[
18
]
=
1.0
*
l
[
45
]
+
1.0
*
l
[
46
]
+
1.0
*
l
[
54
]
out
[
19
]
=
1.0
*
l
[
47
]
+
1.0
*
l
[
55
]
+
1.0
*
l
[
63
]
out
[
20
]
=
1.0
*
l
[
48
]
out
[
21
]
=
1.0
*
l
[
49
]
out
[
22
]
=
1.0
*
l
[
53
]
+
1.0
*
l
[
61
]
+
1.0
*
l
[
69
]
+
1.0
*
l
[
77
]
out
[
23
]
=
1.0
*
l
[
56
]
+
1.0
*
l
[
64
]
+
1.0
*
l
[
72
]
out
[
24
]
=
1.0
*
l
[
57
]
+
1.0
*
l
[
65
]
+
1.0
*
l
[
73
]
out
[
25
]
=
1.0
*
l
[
62
]
+
1.0
*
l
[
70
]
+
1.0
*
l
[
71
]
+
1.0
*
l
[
78
]
+
1.0
*
l
[
79
]
+
1.0
*
l
[
80
]
out
[
26
]
=
1.0
*
l
[
66
]
+
1.0
*
l
[
74
]
+
1.0
*
l
[
75
]
# pack diago-pair (not tested)
l
=
np
.
ravel
(
tile
[:,:,
1
])
out
[
27
]
=
1.0
*
l
[
0
]
+
1.0
*
l
[
10
]
+
1.0
*
l
[
20
]
out
[
28
]
=
1.0
*
l
[
1
]
+
1.0
*
l
[
11
]
+
1.0
*
l
[
21
]
out
[
29
]
=
1.0
*
l
[
2
]
+
1.0
*
l
[
3
]
+
1.0
*
l
[
12
]
out
[
30
]
=
1.0
*
l
[
4
]
+
1.0
*
l
[
14
]
+
1.0
*
l
[
24
]
+
1.0
*
l
[
34
]
+
1.0
*
l
[
44
]
out
[
31
]
=
1.0
*
l
[
5
]
+
1.0
*
l
[
15
]
+
1.0
*
l
[
25
]
+
1.0
*
l
[
35
]
out
[
32
]
=
1.0
*
l
[
6
]
+
1.0
*
l
[
7
]
+
1.0
*
l
[
8
]
+
1.0
*
l
[
16
]
+
1.0
*
l
[
17
]
+
1.0
*
l
[
26
]
out
[
33
]
=
1.0
*
l
[
9
]
+
1.0
*
l
[
19
]
+
1.0
*
l
[
29
]
out
[
34
]
=
1.0
*
l
[
13
]
+
1.0
*
l
[
23
]
+
1.0
*
l
[
43
]
out
[
35
]
=
1.0
*
l
[
18
]
+
1.0
*
l
[
27
]
+
1.0
*
l
[
28
]
out
[
36
]
=
1.0
*
l
[
22
]
+
1.0
*
l
[
32
]
+
1.0
*
l
[
42
]
out
[
37
]
=
1.0
*
l
[
30
]
out
[
38
]
=
1.0
*
l
[
31
]
out
[
39
]
=
1.0
*
l
[
36
]
+
1.0
*
l
[
46
]
+
1.0
*
l
[
56
]
+
1.0
*
l
[
66
]
+
1.0
*
l
[
76
]
out
[
40
]
=
1.0
*
l
[
37
]
+
1.0
*
l
[
47
]
+
1.0
*
l
[
57
]
+
1.0
*
l
[
67
]
out
[
41
]
=
1.0
*
l
[
38
]
+
1.0
*
l
[
48
]
+
1.0
*
l
[
58
]
out
[
42
]
=
1.0
*
l
[
39
]
out
[
43
]
=
1.0
*
l
[
40
]
out
[
44
]
=
1.0
*
l
[
41
]
out
[
45
]
=
1.0
*
l
[
45
]
+
1.0
*
l
[
55
]
+
1.0
*
l
[
65
]
+
1.0
*
l
[
75
]
out
[
46
]
=
1.0
*
l
[
49
]
out
[
47
]
=
1.0
*
l
[
50
]
out
[
48
]
=
1.0
*
l
[
51
]
+
1.0
*
l
[
61
]
+
1.0
*
l
[
71
]
out
[
49
]
=
1.0
*
l
[
52
]
+
1.0
*
l
[
53
]
+
1.0
*
l
[
62
]
out
[
50
]
=
1.0
*
l
[
54
]
+
1.0
*
l
[
63
]
+
1.0
*
l
[
64
]
+
1.0
*
l
[
72
]
+
1.0
*
l
[
73
]
+
1.0
*
l
[
74
]
out
[
51
]
=
1.0
*
l
[
59
]
+
1.0
*
l
[
69
]
+
1.0
*
l
[
79
]
out
[
52
]
=
1.0
*
l
[
60
]
+
1.0
*
l
[
70
]
+
1.0
*
l
[
80
]
out
[
53
]
=
1.0
*
l
[
68
]
+
1.0
*
l
[
77
]
+
1.0
*
l
[
78
]
# pack hor-pairs
l
=
np
.
ravel
(
tile
[:,:,
2
])
out
[
54
]
=
1.0
*
l
[
0
]
+
1.0
*
l
[
1
]
+
1.0
*
l
[
9
]
+
1.0
*
l
[
10
]
+
1.0
*
l
[
18
]
+
1.0
*
l
[
27
]
+
1.0
*
l
[
36
]
+
1.0
*
l
[
45
]
+
1.0
*
l
[
54
]
+
1.0
*
l
[
63
]
+
1.0
*
l
[
64
]
+
1.0
*
l
[
72
]
+
1.0
*
l
[
73
]
out
[
55
]
=
1.0
*
l
[
2
]
+
1.0
*
l
[
11
]
+
1.0
*
l
[
20
]
out
[
56
]
=
1.0
*
l
[
3
]
+
1.0
*
l
[
12
]
+
1.0
*
l
[
21
]
out
[
57
]
=
1.0
*
l
[
4
]
+
1.0
*
l
[
13
]
+
1.0
*
l
[
22
]
out
[
58
]
=
1.0
*
l
[
5
]
+
1.0
*
l
[
14
]
+
1.0
*
l
[
23
]
out
[
59
]
=
1.0
*
l
[
6
]
+
1.0
*
l
[
15
]
+
1.0
*
l
[
24
]
out
[
60
]
=
1.0
*
l
[
7
]
+
1.0
*
l
[
8
]
+
1.0
*
l
[
16
]
+
1.0
*
l
[
17
]
+
1.0
*
l
[
26
]
+
1.0
*
l
[
35
]
+
1.0
*
l
[
44
]
+
1.0
*
l
[
53
]
+
1.0
*
l
[
62
]
+
1.0
*
l
[
70
]
+
1.0
*
l
[
71
]
+
1.0
*
l
[
79
]
+
1.0
*
l
[
80
]
out
[
61
]
=
1.0
*
l
[
19
]
+
1.0
*
l
[
28
]
+
1.0
*
l
[
37
]
+
1.0
*
l
[
46
]
+
1.0
*
l
[
55
]
out
[
62
]
=
1.0
*
l
[
25
]
+
1.0
*
l
[
34
]
+
1.0
*
l
[
43
]
+
1.0
*
l
[
52
]
+
1.0
*
l
[
61
]
out
[
63
]
=
1.0
*
l
[
29
]
+
1.0
*
l
[
38
]
+
1.0
*
l
[
47
]
out
[
64
]
=
1.0
*
l
[
30
]
out
[
65
]
=
1.0
*
l
[
31
]
out
[
66
]
=
1.0
*
l
[
32
]
out
[
67
]
=
1.0
*
l
[
33
]
+
1.0
*
l
[
42
]
+
1.0
*
l
[
51
]
out
[
68
]
=
1.0
*
l
[
39
]
out
[
69
]
=
1.0
*
l
[
40
]
out
[
70
]
=
1.0
*
l
[
41
]
out
[
71
]
=
1.0
*
l
[
48
]
out
[
72
]
=
1.0
*
l
[
49
]
out
[
73
]
=
1.0
*
l
[
50
]
out
[
74
]
=
1.0
*
l
[
56
]
+
1.0
*
l
[
65
]
+
1.0
*
l
[
74
]
out
[
75
]
=
1.0
*
l
[
57
]
+
1.0
*
l
[
66
]
+
1.0
*
l
[
75
]
out
[
76
]
=
1.0
*
l
[
58
]
+
1.0
*
l
[
67
]
+
1.0
*
l
[
76
]
out
[
77
]
=
1.0
*
l
[
59
]
+
1.0
*
l
[
68
]
+
1.0
*
l
[
77
]
out
[
78
]
=
1.0
*
l
[
60
]
+
1.0
*
l
[
69
]
+
1.0
*
l
[
78
]
# pack vert-pairs
l
=
np
.
ravel
(
tile
[:,:,
3
])
out
[
79
]
=
1.0
*
l
[
0
]
+
1.0
*
l
[
1
]
+
1.0
*
l
[
2
]
+
1.0
*
l
[
3
]
+
1.0
*
l
[
4
]
+
1.0
*
l
[
5
]
+
1.0
*
l
[
6
]
+
1.0
*
l
[
7
]
+
1.0
*
l
[
8
]
+
1.0
*
l
[
9
]
+
1.0
*
l
[
10
]
+
1.0
*
l
[
11
]
+
1.0
*
l
[
16
]
+
1.0
*
l
[
17
]
out
[
80
]
=
1.0
*
l
[
11
]
+
1.0
*
l
[
12
]
+
1.0
*
l
[
13
]
+
1.0
*
l
[
14
]
+
1.0
*
l
[
15
]
out
[
81
]
=
1.0
*
l
[
18
]
+
1.0
*
l
[
19
]
+
1.0
*
l
[
20
]
out
[
82
]
=
1.0
*
l
[
21
]
+
1.0
*
l
[
22
]
+
1.0
*
l
[
23
]
out
[
83
]
=
1.0
*
l
[
24
]
+
1.0
*
l
[
25
]
+
1.0
*
l
[
26
]
out
[
84
]
=
1.0
*
l
[
27
]
+
1.0
*
l
[
28
]
+
1.0
*
l
[
29
]
out
[
85
]
=
1.0
*
l
[
30
]
out
[
86
]
=
1.0
*
l
[
31
]
out
[
87
]
=
1.0
*
l
[
32
]
out
[
88
]
=
1.0
*
l
[
33
]
+
1.0
*
l
[
34
]
+
1.0
*
l
[
35
]
out
[
89
]
=
1.0
*
l
[
36
]
+
1.0
*
l
[
37
]
+
1.0
*
l
[
38
]
out
[
90
]
=
1.0
*
l
[
39
]
out
[
91
]
=
1.0
*
l
[
40
]
out
[
92
]
=
1.0
*
l
[
41
]
out
[
93
]
=
1.0
*
l
[
42
]
+
1.0
*
l
[
43
]
+
1.0
*
l
[
44
]
out
[
94
]
=
1.0
*
l
[
45
]
+
1.0
*
l
[
46
]
+
1.0
*
l
[
47
]
out
[
95
]
=
1.0
*
l
[
48
]
out
[
96
]
=
1.0
*
l
[
49
]
out
[
97
]
=
1.0
*
l
[
50
]
out
[
98
]
=
1.0
*
l
[
51
]
+
1.0
*
l
[
52
]
+
1.0
*
l
[
53
]
out
[
99
]
=
1.0
*
l
[
54
]
+
1.0
*
l
[
55
]
+
1.0
*
l
[
56
]
out
[
100
]
=
1.0
*
l
[
57
]
+
1.0
*
l
[
58
]
+
1.0
*
l
[
59
]
out
[
101
]
=
1.0
*
l
[
60
]
+
1.0
*
l
[
61
]
+
1.0
*
l
[
62
]
out
[
102
]
=
1.0
*
l
[
63
]
+
1.0
*
l
[
64
]
+
1.0
*
l
[
70
]
+
1.0
*
l
[
71
]
+
1.0
*
l
[
72
]
+
1.0
*
l
[
73
]
+
1.0
*
l
[
74
]
+
1.0
*
l
[
75
]
+
1.0
*
l
[
76
]
+
1.0
*
l
[
77
]
+
1.0
*
l
[
78
]
+
1.0
*
l
[
79
]
+
1.0
*
l
[
80
]
out
[
103
]
=
1.0
*
l
[
65
]
+
1.0
*
l
[
66
]
+
1.0
*
l
[
67
]
+
1.0
*
l
[
68
]
+
1.0
*
l
[
69
]
return
out
# pack single
# pack single
def
pack_tile
(
tile
):
def
pack_tile
(
tile
):
return
pack_tile_type1
(
tile
)
return
pack_tile_type1
(
tile
)
# pack all tiles
# pack all tiles
def
pack
(
tiles
):
def
pack
(
tiles
,
ptype
=
1
):
output
=
np
.
array
([[
pack_tile
(
tiles
[
i
,
j
])
for
j
in
range
(
tiles
.
shape
[
1
])]
for
i
in
range
(
tiles
.
shape
[
0
])])
if
ptype
==
1
:
pack_func
=
pack_tile_type1
elif
ptype
==
2
:
pack_func
=
pack_tile_type2
output
=
np
.
array
([[
pack_func
(
tiles
[
i
,
j
])
for
j
in
range
(
tiles
.
shape
[
1
])]
for
i
in
range
(
tiles
.
shape
[
0
])])
return
output
return
output
...
...
test_nn_feed.py
View file @
0452a446
...
@@ -46,6 +46,7 @@ def print_time():
...
@@ -46,6 +46,7 @@ def print_time():
VALUES_LAYER_NAME
=
'other'
VALUES_LAYER_NAME
=
'other'
LAYERS_OF_INTEREST
=
[
'diagm-pair'
,
'diago-pair'
,
'hor-pairs'
,
'vert-pairs'
]
LAYERS_OF_INTEREST
=
[
'diagm-pair'
,
'diago-pair'
,
'hor-pairs'
,
'vert-pairs'
]
RADIUS
=
1
RADIUS
=
1
TILE_PACKING_TYPE
=
1
DEBUG_PLT_LOSS
=
True
DEBUG_PLT_LOSS
=
True
# If false - will not pack or rescal
# If false - will not pack or rescal
...
@@ -91,23 +92,27 @@ def lrelu(x):
...
@@ -91,23 +92,27 @@ def lrelu(x):
def
network
(
input
):
def
network
(
input
):
fc1
=
slim
.
fully_connected
(
input
,
512
,
activation_fn
=
lrelu
,
scope
=
'g_fc1'
)
fc1
=
slim
.
fully_connected
(
input
,
1024
,
activation_fn
=
lrelu
,
scope
=
'g_fc1'
)
fc2
=
slim
.
fully_connected
(
fc1
,
2
,
activation_fn
=
lrelu
,
scope
=
'g_fc2'
)
#
fc2 = slim.fully_connected(fc1, 2,activation_fn=lrelu,scope='g_fc2')
return
fc2
#
return fc2
#
fc2 = slim.fully_connected(fc1, 1024,activation_fn=lrelu,scope='g_fc2')
fc2
=
slim
.
fully_connected
(
fc1
,
1024
,
activation_fn
=
lrelu
,
scope
=
'g_fc2'
)
#
fc3 = slim.fully_connected(fc2, 512,activation_fn=lrelu,scope='g_fc3')
fc3
=
slim
.
fully_connected
(
fc2
,
512
,
activation_fn
=
lrelu
,
scope
=
'g_fc3'
)
#
fc4 = slim.fully_connected(fc3, 8,activation_fn=lrelu,scope='g_fc4')
fc4
=
slim
.
fully_connected
(
fc3
,
8
,
activation_fn
=
lrelu
,
scope
=
'g_fc4'
)
#
fc5 = slim.fully_connected(fc4, 4,activation_fn=lrelu,scope='g_fc5')
fc5
=
slim
.
fully_connected
(
fc4
,
4
,
activation_fn
=
lrelu
,
scope
=
'g_fc5'
)
#
fc6 = slim.fully_connected(fc5, 2,activation_fn=lrelu,scope='g_fc6')
fc6
=
slim
.
fully_connected
(
fc5
,
2
,
activation_fn
=
lrelu
,
scope
=
'g_fc6'
)
#
return fc6
return
fc6
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
101
])
if
TILE_PACKING_TYPE
==
1
:
gt
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
2
])
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
101
])
elif
TILE_PACKING_TYPE
==
2
:
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
105
])
gt
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
2
])
#losses = tf.get_variable("losses", [None])
#losses = tf.get_variable("losses", [None])
...
@@ -135,23 +140,23 @@ cf_w_norm = tf.nn.softmax(cf_w)
...
@@ -135,23 +140,23 @@ cf_w_norm = tf.nn.softmax(cf_w)
#G_loss = tf.reduce_mean(tf.abs(tf.nn.softmax(out[:,1])*out[:,0]-cf_w_norm*gt[:,0]))
#G_loss = tf.reduce_mean(tf.abs(tf.nn.softmax(out[:,1])*out[:,0]-cf_w_norm*gt[:,0]))
#G_loss = tf.reduce_mean(tf.squared_difference(out[:,0], gt[:,0]))
#G_loss = tf.reduce_mean(tf.squared_difference(out[:,0], gt[:,0]))
#
G_loss = tf.reduce_mean(tf.abs(out[:,0]-gt[:,0]))
G_loss
=
tf
.
reduce_mean
(
tf
.
abs
(
out
[:,
0
]
-
gt
[:,
0
]))
G_loss
=
tf
.
losses
.
mean_squared_error
(
gt
[:,
0
],
out
[:,
0
],
cf_w
)
#
G_loss = tf.losses.mean_squared_error(gt[:,0],out[:,0],cf_w)
tf
.
summary
.
scalar
(
'loss'
,
G_loss
)
#
tf.summary.scalar('loss', G_loss)
tf
.
summary
.
scalar
(
'prediction'
,
out
[
0
,
0
])
#
tf.summary.scalar('prediction', out[0,0])
tf
.
summary
.
scalar
(
'ground truth'
,
gt
[
0
,
0
])
#
tf.summary.scalar('ground truth', gt[0,0])
t_vars
=
tf
.
trainable_variables
()
t_vars
=
tf
.
trainable_variables
()
lr
=
tf
.
placeholder
(
tf
.
float32
)
lr
=
tf
.
placeholder
(
tf
.
float32
)
G_opt
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
lr
)
.
minimize
(
G_loss
,
var_list
=
[
var
for
var
in
t_vars
if
var
.
name
.
startswith
(
'g_'
)]
)
G_opt
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
lr
)
.
minimize
(
G_loss
)
saver
=
tf
.
train
.
Saver
()
saver
=
tf
.
train
.
Saver
()
# ?!!!!!
# ?!!!!!
merged
=
tf
.
summary
.
merge_all
()
#
merged = tf.summary.merge_all()
train_writer
=
tf
.
summary
.
FileWriter
(
result_dir
+
'/train'
,
sess
.
graph
)
#
train_writer = tf.summary.FileWriter(result_dir + '/train', sess.graph)
test_writer
=
tf
.
summary
.
FileWriter
(
result_dir
+
'/test'
)
#
test_writer = tf.summary.FileWriter(result_dir + '/test')
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
ckpt
=
tf
.
train
.
get_checkpoint_state
(
checkpoint_dir
)
ckpt
=
tf
.
train
.
get_checkpoint_state
(
checkpoint_dir
)
...
@@ -185,132 +190,143 @@ if DEBUG_PLT_LOSS:
...
@@ -185,132 +190,143 @@ if DEBUG_PLT_LOSS:
pass
pass
training_tiles
=
np
.
array
([])
training_values
=
np
.
array
([])
# get epoch train data
for
i
in
range
(
len
(
tlist
)):
# RUN
print
(
bcolors
.
OKGREEN
+
"Opening "
+
tlist
[
i
]
+
bcolors
.
ENDC
)
# epoch is one image
tmp_tiff
=
ijt
.
imagej_tiff
(
tlist
[
i
])
tmp_tiles
=
tmp_tiff
.
getstack
(
labels
,
shape_as_tiles
=
True
)
tmp_vals
=
tmp_tiff
.
getvalues
(
label
=
VALUES_LAYER_NAME
)
# might not need it because going to loop through anyway
if
TILE_PACKING_TYPE
==
1
:
packed_tiles
=
pile
.
pack
(
tmp_tiles
)
elif
TILE_PACKING_TYPE
==
2
:
packed_tiles
=
pile
.
pack
(
tmp_tiles
,
TILE_PACKING_TYPE
)
packed_tiles
=
np
.
dstack
((
packed_tiles
,
tmp_vals
[:,:,
0
]))
for
epoch
in
range
(
lastepoch
,
lastepoch
+
len
(
tlist
)):
packed_tiles
=
np
.
reshape
(
packed_tiles
,(
-
1
,
packed_tiles
.
shape
[
-
1
]))
values
=
np
.
reshape
(
tmp_vals
[:,:,
1
:
3
],(
-
1
,
2
))
p
rint
(
bcolors
.
HEADER
+
"Epoch #"
+
str
(
epoch
)
+
bcolors
.
ENDC
)
p
acked_tiles_filtered
=
np
.
array
([]
)
if
os
.
path
.
isdir
(
"result/
%04
d"
%
epoch
):
print
(
"Unfiltered: "
+
str
(
packed_tiles
.
shape
))
continue
tlist_index
=
epoch
-
lastepoch
print
(
bcolors
.
OKGREEN
+
"Processing "
+
tlist
[
tlist_index
]
+
bcolors
.
ENDC
)
for
j
in
range
(
packed_tiles
.
shape
[
0
]):
tmp_tiff
=
ijt
.
imagej_tiff
(
tlist
[
tlist_index
])
skip_tile
=
False
tmp_tiles
=
tmp_tiff
.
getstack
(
labels
,
shape_as_tiles
=
True
)
if
np
.
isnan
(
np
.
sum
(
packed_tiles
[
j
])):
tmp_vals
=
tmp_tiff
.
getvalues
(
label
=
VALUES_LAYER_NAME
)
skip_tile
=
True
if
np
.
isnan
(
np
.
sum
(
values
[
j
])):
skip_tile
=
True
# might not need it because going to loop through anyway
if
not
skip_tile
:
packed_tiles
=
pile
.
pack
(
tmp_tiles
)
if
len
(
packed_tiles_filtered
)
==
0
:
packed_tiles
=
np
.
dstack
((
packed_tiles
,
tmp_vals
[:,:,
0
]))
packed_tiles_filtered
=
np
.
array
([
packed_tiles
[
j
]])
values_filtered
=
np
.
array
([
values
[
j
]])
else
:
packed_tiles_filtered
=
np
.
append
(
packed_tiles_filtered
,[
packed_tiles
[
j
]],
axis
=
0
)
values_filtered
=
np
.
append
(
values_filtered
,[
values
[
j
]],
axis
=
0
)
#if epoch > 2000:
print
(
"NaN-filtered: "
+
str
(
packed_tiles_filtered
.
shape
))
# LR = 1e-5
# so, here get the image, remove nans and run for 100x times
if
i
==
0
:
packed_tiles
[
np
.
isnan
(
packed_tiles
)]
=
0.0
training_tiles
=
packed_tiles_filtered
tmp_vals
[
np
.
isnan
(
tmp_vals
)]
=
0.0
training_values
=
values_filtered
else
:
training_tiles
=
np
.
concatenate
((
training_tiles
,
packed_tiles_filtered
),
axis
=
0
)
training_values
=
np
.
concatenate
((
training_values
,
values_filtered
),
axis
=
0
)
#packed_tiles = packed_tiles[::,::]
print
(
"Training set shape: "
+
str
(
training_tiles
.
shape
))
values
=
tmp_vals
input_patch
=
np
.
reshape
(
packed_tiles
,(
-
1
,
101
))
# RUN
gt_patch
=
np
.
reshape
(
values
[:,:,
1
:
3
],(
-
1
,
2
))
# epoch is all available images
# batch is a number of non-zero tiles
g_loss
=
np
.
zeros
(
input_patch
.
shape
[
0
])
g_loss
=
np
.
zeros
(
training_tiles
.
shape
[
0
])
#for epoch in range(lastepoch,lastepoch+len(tlist)):
for
epoch
in
range
(
lastepoch
,
500
):
for
i
in
range
(
100
):
print
(
bcolors
.
HEADER
+
"Epoch #"
+
str
(
epoch
)
+
bcolors
.
ENDC
)
print
(
bcolors
.
OKBLUE
+
"Iteration "
+
str
(
i
)
+
bcolors
.
ENDC
)
if
os
.
path
.
isdir
(
"result/
%04
d"
%
epoch
):
continue
st
=
time
.
time
()
#if epoch > 2000:
# LR = 1e-5
skip_iteration
=
False
# so, here get the image, remove nans and run for 100x times
#packed_tiles[np.isnan(packed_tiles)] = 0.0
#tmp_vals[np.isnan(tmp_vals)] = 0.0
# if nan skip run!
input_patch
=
training_tiles
if
np
.
isnan
(
np
.
sum
(
gt_patch
)):
gt_patch
=
training_values
print
(
"GT has NaNs"
)
#skip_iteration = True
if
np
.
isnan
(
np
.
sum
(
input_patch
)):
st
=
time
.
time
()
print
(
"Patch has NaNs"
)
#skip_iteration = True
if
skip_iteration
:
#run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
#print(bcolors.WARNING+"Found NaN, skipping iteration for tile "+str(i)+","+str(j)+bcolors.ENDC)
#run_metadata = tf.RunMetadata()
pass
#_,G_current,output = sess.run([G_opt,G_loss,out],feed_dict={in_tile:input_patch,gt:gt_patch,lr:LR},options=run_options,run_metadata=run_metadata)
else
:
run_options
=
tf
.
RunOptions
(
trace_level
=
tf
.
RunOptions
.
FULL_TRACE
)
_
,
G_current
,
output
=
sess
.
run
([
G_opt
,
G_loss
,
out
],
feed_dict
=
{
in_tile
:
input_patch
,
gt
:
gt_patch
,
lr
:
LR
})
run_metadata
=
tf
.
RunMetadata
()
_
,
G_current
,
output
,
summary
=
sess
.
run
([
G_opt
,
G_loss
,
out
,
merged
],
feed_dict
=
{
in_tile
:
input_patch
,
gt
:
gt_patch
,
lr
:
LR
},
options
=
run_options
,
run_metadata
=
run_metadata
)
g_loss
[
i
]
=
G_current
#_,G_current,output = sess.run([G_opt,G_loss,out],feed_dict={in_tile:input_patch,gt:gt_patch,lr:LR}
)
mean_loss
=
np
.
mean
(
g_loss
[
np
.
where
(
g_loss
)]
)
g_loss
[
i
]
=
G_current
if
DEBUG_PLT_LOSS
:
mean_loss
=
np
.
mean
(
g_loss
[
np
.
where
(
g_loss
)])
if
DEBUG_PLT_LOSS
:
recorded_loss
.
append
(
G_current
)
recorded_mean_loss
.
append
(
mean_loss
)
recorded_loss
.
append
(
G_current
)
recorded_pr_d
.
append
(
output
[
0
,
0
]
)
recorded_mean_loss
.
append
(
mean_loss
)
recorded_pr_c
.
append
(
output
[
0
,
1
]
)
recorded_pr_d
.
append
(
output
[
0
,
0
])
recorded_gt_d
.
append
(
gt_patch
[
0
,
0
])
recorded_pr_c
.
append
(
output
[
0
,
1
])
recorded_gt_c
.
append
(
gt_patch
[
0
,
1
])
recorded_gt_d
.
append
(
gt_patch
[
0
,
0
])
plt
.
clf
()
recorded_gt_c
.
append
(
gt_patch
[
0
,
1
])
plt
.
clf
(
)
plt
.
subplot
(
311
)
plt
.
subplot
(
311
)
plt
.
plot
(
recorded_loss
,
label
=
'loss'
)
plt
.
plot
(
recorded_mean_loss
,
label
=
'mean loss'
,
color
=
'red'
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Loss'
)
plt
.
title
(
"Loss=
%.5
f, Mean Loss=
%.5
f"
%
(
G_current
,
mean_loss
),
fontdict
=
{
'size'
:
20
,
'color'
:
'red'
})
#plt.text(0.5, 0.5, 'Loss=%.5f' % G_current, fontdict={'size': 20, 'color': 'red'})
plt
.
plot
(
recorded_loss
,
label
=
'loss'
)
plt
.
subplot
(
312
)
plt
.
plot
(
recorded_mean_loss
,
label
=
'mean loss'
,
color
=
'red'
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Loss'
)
plt
.
title
(
"Loss=
%.5
f, Mean Loss=
%.5
f"
%
(
G_current
,
mean_loss
),
fontdict
=
{
'size'
:
20
,
'color'
:
'red'
})
#plt.text(0.5, 0.5, 'Loss=%.5f' % G_current, fontdict={'size': 20, 'color': 'red'})
plt
.
subplot
(
312
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Disparities'
)
plt
.
plot
(
recorded_gt_d
,
label
=
'gt_d'
,
color
=
'green'
)
plt
.
plot
(
recorded_pr_d
,
label
=
'pr_d'
,
color
=
'red'
)
plt
.
legend
(
loc
=
'best'
,
ncol
=
1
)
plt
.
xlabel
(
'Iteration'
)
plt
.
subplot
(
313
)
plt
.
ylabel
(
'Disparities'
)
plt
.
plot
(
recorded_gt_d
,
label
=
'gt_d'
,
color
=
'green'
)
plt
.
plot
(
recorded_pr_d
,
label
=
'pr_d'
,
color
=
'red'
)
plt
.
legend
(
loc
=
'best'
,
ncol
=
1
)
plt
.
subplot
(
313
)
plt
.
xlabel
(
'Iteration'
)
plt
.
ylabel
(
'Confidences'
)
plt
.
plot
(
recorded_gt_c
,
label
=
'gt_c'
,
color
=
'green'
)
plt
.
plot
(
recorded_pr_c
,
label
=
'pr_c'
,
color
=
'red'
)
plt
.
legend
(
loc
=
'best'
,
ncol
=
1
)
plt
.
xlabel
(
'Iteration'
)
plt
.
pause
(
0.001
)
plt
.
ylabel
(
'Confidences'
)
plt
.
plot
(
recorded_gt_c
,
label
=
'gt_c'
,
color
=
'green'
)
plt
.
plot
(
recorded_pr_c
,
label
=
'pr_c'
,
color
=
'red'
)
plt
.
legend
(
loc
=
'best'
,
ncol
=
1
)
plt
.
pause
(
0.001
)
else
:
print
(
"
%
d
%
d Loss=
%.3
f CurrentLoss=
%.3
f Time=
%.3
f"
%
(
epoch
,
i
,
mean_loss
,
G_current
,
time
.
time
()
-
st
))
else
:
print
(
"
%
d
%
d Loss=
%.3
f CurrentLoss=
%.3
f Time=
%.3
f"
%
(
epoch
,
i
,
mean_loss
,
G_current
,
time
.
time
()
-
st
))
#train_writer.add_run_metadata(run_metadata, 'step%d' % cnt)
#test_writer.add_summary(summary,cnt)
#train_writer.add_summary(summary, cnt)
if
epoch
%
save_freq
==
0
:
if
epoch
%
save_freq
==
0
:
if
not
os
.
path
.
isdir
(
result_dir
+
'
%04
d'
%
epoch
):
if
not
os
.
path
.
isdir
(
result_dir
+
'
%04
d'
%
epoch
):
os
.
makedirs
(
result_dir
+
'
%04
d'
%
epoch
)
os
.
makedirs
(
result_dir
+
'
%04
d'
%
epoch
)
saver
.
save
(
sess
,
checkpoint_dir
+
'model.ckpt'
)
saver
.
save
(
sess
,
checkpoint_dir
+
'model.ckpt'
)
train_writer
.
close
()
test_writer
.
close
()
print_time
()
print_time
()
print
(
bcolors
.
OKGREEN
+
"time: "
+
str
(
time
.
time
())
+
bcolors
.
ENDC
)
print
(
bcolors
.
OKGREEN
+
"time: "
+
str
(
time
.
time
())
+
bcolors
.
ENDC
)
...
...
test_nn_infer.py
View file @
0452a446
...
@@ -44,6 +44,7 @@ def print_time():
...
@@ -44,6 +44,7 @@ def print_time():
VALUES_LAYER_NAME
=
'other'
VALUES_LAYER_NAME
=
'other'
LAYERS_OF_INTEREST
=
[
'diagm-pair'
,
'diago-pair'
,
'hor-pairs'
,
'vert-pairs'
]
LAYERS_OF_INTEREST
=
[
'diagm-pair'
,
'diago-pair'
,
'hor-pairs'
,
'vert-pairs'
]
RADIUS
=
1
RADIUS
=
1
TILE_PACKING_TYPE
=
1
try
:
try
:
src
=
sys
.
argv
[
1
]
src
=
sys
.
argv
[
1
]
...
@@ -83,7 +84,11 @@ def network(input):
...
@@ -83,7 +84,11 @@ def network(input):
sess
=
tf
.
Session
()
sess
=
tf
.
Session
()
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
101
])
if
TILE_PACKING_TYPE
==
1
:
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
101
])
elif
TILE_PACKING_TYPE
==
2
:
in_tile
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
105
])
gt
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
2
])
gt
=
tf
.
placeholder
(
tf
.
float32
,[
None
,
2
])
out
=
network
(
in_tile
)
out
=
network
(
in_tile
)
...
@@ -143,7 +148,11 @@ for item in tlist:
...
@@ -143,7 +148,11 @@ for item in tlist:
# tiles and values
# tiles and values
# might not need it because going to loop through anyway
# might not need it because going to loop through anyway
packed_tiles
=
pile
.
pack
(
tiles
)
if
TILE_PACKING_TYPE
==
1
:
packed_tiles
=
pile
.
pack
(
tiles
)
elif
TILE_PACKING_TYPE
==
2
:
packed_tiles
=
pile
.
pack
(
tiles
,
TILE_PACKING_TYPE
)
packed_tiles
=
np
.
dstack
((
packed_tiles
,
values
[:,:,
0
]))
packed_tiles
=
np
.
dstack
((
packed_tiles
,
values
[:,:,
0
]))
print
(
packed_tiles
.
shape
)
print
(
packed_tiles
.
shape
)
...
@@ -168,11 +177,12 @@ for item in tlist:
...
@@ -168,11 +177,12 @@ for item in tlist:
packed_tiles_flat
=
packed_tiles
[
i
]
packed_tiles_flat
=
packed_tiles
[
i
]
values_flat
=
values
[
i
]
values_flat
=
values
[
i
]
# whole row at once
output
=
sess
.
run
(
out
,
feed_dict
=
{
in_tile
:
packed_tiles_flat
})
output
=
sess
.
run
(
out
,
feed_dict
=
{
in_tile
:
packed_tiles_flat
})
output_image
[
i
]
=
output
output_image
[
i
]
=
output
# so, let's print
# so, let's print
for
j
in
range
(
output
.
shape
[
0
]):
for
j
in
range
(
packed_tiles
.
shape
[
0
]):
p
=
output
[
j
,
0
]
p
=
output
[
j
,
0
]
pc
=
output
[
j
,
1
]
pc
=
output
[
j
,
1
]
fv
=
values_flat
[
j
,
0
]
fv
=
values_flat
[
j
,
0
]
...
@@ -204,6 +214,15 @@ for item in tlist:
...
@@ -204,6 +214,15 @@ for item in tlist:
tif
=
np
.
dstack
((
im1
,
im2
,
im3
))
tif
=
np
.
dstack
((
im1
,
im2
,
im3
))
im3
=
np
.
ravel
(
im3
)
print
(
im3
.
shape
)
im4
=
im3
[
~
np
.
isnan
(
im3
)]
rms
=
np
.
sqrt
(
np
.
mean
(
np
.
square
(
im4
)))
print
(
"RMS = "
+
str
(
rms
))
imagej_tiffwriter
.
save
(
'prediction_results.tiff'
,
tif
)
imagej_tiffwriter
.
save
(
'prediction_results.tiff'
,
tif
)
#sys.exit(0)
#sys.exit(0)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment