Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
python3-imagej-tiff
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
python3-imagej-tiff
Commits
d0bbd388
Commit
d0bbd388
authored
Aug 03, 2018
by
Andrey Filippov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added scope
parent
da662515
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
70 additions
and
69 deletions
+70
-69
nn_ds_inmem.py
nn_ds_inmem.py
+70
-69
No files found.
nn_ds_inmem.py
View file @
d0bbd388
...
@@ -194,6 +194,7 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -194,6 +194,7 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
conf_pwr
=
2.0
,
conf_pwr
=
2.0
,
gt_conf_offset
=
0.08
,
gt_conf_offset
=
0.08
,
gt_conf_pwr
=
1.0
):
gt_conf_pwr
=
1.0
):
with
tf
.
name_scope
(
"BatchLoss"
):
"""
"""
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
"""
"""
...
@@ -209,11 +210,11 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -209,11 +210,11 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
if
gt_conf_pwr
==
0
:
if
gt_conf_pwr
==
0
:
w
=
tf
.
ones
((
out_batch
.
shape
[
0
]),
dtype
=
tf
.
float32
,
name
=
"w_ones"
)
w
=
tf
.
ones
((
out_batch
.
shape
[
0
]),
dtype
=
tf
.
float32
,
name
=
"w_ones"
)
else
:
else
:
# w_slice = tf.slice(gt_ds_batch,[0,1],[-1,1], name = "w_gt_slice")
# w_slice = tf.slice(gt_ds_batch,[0,1],[-1,1], name = "w_gt_slice")
w_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
1
],[
-
1
],
name
=
"w_gt_slice"
)
w_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
1
],[
-
1
],
name
=
"w_gt_slice"
)
w_sub
=
tf
.
subtract
(
w_slice
,
tf_gt_conf_offset
,
name
=
"w_sub"
)
w_sub
=
tf
.
subtract
(
w_slice
,
tf_gt_conf_offset
,
name
=
"w_sub"
)
# w_clip = tf.clip_by_value(w_sub, tf_0f,tf_maxw, name = "w_clip")
# w_clip = tf.clip_by_value(w_sub, tf_0f,tf_maxw, name = "w_clip")
w_clip
=
tf
.
maximum
(
w_sub
,
tf_0f
,
name
=
"w_clip"
)
w_clip
=
tf
.
maximum
(
w_sub
,
tf_0f
,
name
=
"w_clip"
)
if
gt_conf_pwr
==
1.0
:
if
gt_conf_pwr
==
1.0
:
w
=
w_clip
w
=
w_clip
...
@@ -222,7 +223,7 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -222,7 +223,7 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
if
use_confidence
:
if
use_confidence
:
tf_num_tilesf
=
tf
.
cast
(
tf_num_tiles
,
dtype
=
tf
.
float32
,
name
=
"tf_num_tilesf"
)
tf_num_tilesf
=
tf
.
cast
(
tf_num_tiles
,
dtype
=
tf
.
float32
,
name
=
"tf_num_tilesf"
)
# conf_slice = tf.slice(out_batch,[0,1],[-1,1], name = "conf_slice")
# conf_slice = tf.slice(out_batch,[0,1],[-1,1], name = "conf_slice")
conf_slice
=
tf
.
reshape
(
out_batch
[:,
1
],[
-
1
],
name
=
"conf_slice"
)
conf_slice
=
tf
.
reshape
(
out_batch
[:,
1
],[
-
1
],
name
=
"conf_slice"
)
conf_sum
=
tf
.
reduce_sum
(
conf_slice
,
name
=
"conf_sum"
)
conf_sum
=
tf
.
reduce_sum
(
conf_slice
,
name
=
"conf_sum"
)
conf_avg
=
tf
.
divide
(
conf_sum
,
tf_num_tilesf
,
name
=
"conf_avg"
)
conf_avg
=
tf
.
divide
(
conf_sum
,
tf_num_tilesf
,
name
=
"conf_avg"
)
...
@@ -239,15 +240,15 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -239,15 +240,15 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
w_all
=
tf
.
multiply
(
w
,
nconf
,
name
=
"w_all"
)
w_all
=
tf
.
multiply
(
w
,
nconf
,
name
=
"w_all"
)
else
:
else
:
w_all
=
w
w_all
=
w
cost2
=
0.0
#
cost2 = 0.0
cost3
=
0.0
#
cost3 = 0.0
# normalize weights
# normalize weights
w_sum
=
tf
.
reduce_sum
(
w_all
,
name
=
"w_sum"
)
w_sum
=
tf
.
reduce_sum
(
w_all
,
name
=
"w_sum"
)
iw_sum
=
tf
.
divide
(
tf_1f
,
w_sum
,
name
=
"iw_sum"
)
iw_sum
=
tf
.
divide
(
tf_1f
,
w_sum
,
name
=
"iw_sum"
)
w_norm
=
tf
.
multiply
(
w_all
,
iw_sum
,
name
=
"w_norm"
)
w_norm
=
tf
.
multiply
(
w_all
,
iw_sum
,
name
=
"w_norm"
)
# disp_slice = tf.slice(out_batch,[0,0],[-1,1], name = "disp_slice")
# disp_slice = tf.slice(out_batch,[0,0],[-1,1], name = "disp_slice")
# d_gt_slice = tf.slice(gt_ds_batch,[0,0],[-1,1], name = "d_gt_slice")
# d_gt_slice = tf.slice(gt_ds_batch,[0,0],[-1,1], name = "d_gt_slice")
disp_slice
=
tf
.
reshape
(
out_batch
[:,
0
],[
-
1
],
name
=
"disp_slice"
)
disp_slice
=
tf
.
reshape
(
out_batch
[:,
0
],[
-
1
],
name
=
"disp_slice"
)
d_gt_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
0
],[
-
1
],
name
=
"d_gt_slice"
)
d_gt_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
0
],[
-
1
],
name
=
"d_gt_slice"
)
if
absolute_disparity
:
if
absolute_disparity
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment