Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
python3-imagej-tiff
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
python3-imagej-tiff
Commits
d0bbd388
Commit
d0bbd388
authored
Aug 03, 2018
by
Andrey Filippov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added scope
parent
da662515
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
70 additions
and
69 deletions
+70
-69
nn_ds_inmem.py
nn_ds_inmem.py
+70
-69
No files found.
nn_ds_inmem.py
View file @
d0bbd388
...
@@ -194,77 +194,78 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -194,77 +194,78 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
conf_pwr
=
2.0
,
conf_pwr
=
2.0
,
gt_conf_offset
=
0.08
,
gt_conf_offset
=
0.08
,
gt_conf_pwr
=
1.0
):
gt_conf_pwr
=
1.0
):
"""
with
tf
.
name_scope
(
"BatchLoss"
):
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
"""
"""
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
tf_lambda_conf_avg
=
tf
.
constant
(
lambda_conf_avg
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_avg"
)
"""
tf_lambda_conf_pwr
=
tf
.
constant
(
lambda_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_pwr"
)
tf_lambda_conf_avg
=
tf
.
constant
(
lambda_conf_avg
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_avg"
)
tf_conf_pwr
=
tf
.
constant
(
conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_conf_pwr"
)
tf_lambda_conf_pwr
=
tf
.
constant
(
lambda_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_pwr"
)
tf_gt_conf_offset
=
tf
.
constant
(
gt_conf_offset
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_offset"
)
tf_conf_pwr
=
tf
.
constant
(
conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_conf_pwr"
)
tf_gt_conf_pwr
=
tf
.
constant
(
gt_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_pwr"
)
tf_gt_conf_offset
=
tf
.
constant
(
gt_conf_offset
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_offset"
)
tf_num_tiles
=
tf
.
shape
(
gt_ds_batch
)[
0
]
tf_gt_conf_pwr
=
tf
.
constant
(
gt_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_pwr"
)
tf_0f
=
tf
.
constant
(
0.0
,
dtype
=
tf
.
float32
,
name
=
"tf_0f"
)
tf_num_tiles
=
tf
.
shape
(
gt_ds_batch
)[
0
]
tf_1f
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_1f"
)
tf_0f
=
tf
.
constant
(
0.0
,
dtype
=
tf
.
float32
,
name
=
"tf_0f"
)
tf_maxw
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_maxw"
)
tf_1f
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_1f"
)
if
gt_conf_pwr
==
0
:
tf_maxw
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_maxw"
)
w
=
tf
.
ones
((
out_batch
.
shape
[
0
]),
dtype
=
tf
.
float32
,
name
=
"w_ones"
)
if
gt_conf_pwr
==
0
:
else
:
w
=
tf
.
ones
((
out_batch
.
shape
[
0
]),
dtype
=
tf
.
float32
,
name
=
"w_ones"
)
# w_slice = tf.slice(gt_ds_batch,[0,1],[-1,1], name = "w_gt_slice")
w_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
1
],[
-
1
],
name
=
"w_gt_slice"
)
w_sub
=
tf
.
subtract
(
w_slice
,
tf_gt_conf_offset
,
name
=
"w_sub"
)
# w_clip = tf.clip_by_value(w_sub, tf_0f,tf_maxw, name = "w_clip")
w_clip
=
tf
.
maximum
(
w_sub
,
tf_0f
,
name
=
"w_clip"
)
if
gt_conf_pwr
==
1.0
:
w
=
w_clip
else
:
else
:
w
=
tf
.
pow
(
w_clip
,
tf_gt_conf_pwr
,
name
=
"w"
)
# w_slice = tf.slice(gt_ds_batch,[0,1],[-1,1], name = "w_gt_slice")
w_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
1
],[
-
1
],
name
=
"w_gt_slice"
)
if
use_confidence
:
tf_num_tilesf
=
tf
.
cast
(
tf_num_tiles
,
dtype
=
tf
.
float32
,
name
=
"tf_num_tilesf"
)
w_sub
=
tf
.
subtract
(
w_slice
,
tf_gt_conf_offset
,
name
=
"w_sub"
)
# conf_slice = tf.slice(out_batch,[0,1],[-1,1], name = "conf_slice")
# w_clip = tf.clip_by_value(w_sub, tf_0f,tf_maxw, name = "w_clip")
conf_slice
=
tf
.
reshape
(
out_batch
[:,
1
],[
-
1
],
name
=
"conf_slice"
)
w_clip
=
tf
.
maximum
(
w_sub
,
tf_0f
,
name
=
"w_clip"
)
conf_sum
=
tf
.
reduce_sum
(
conf_slice
,
name
=
"conf_sum"
)
if
gt_conf_pwr
==
1.0
:
conf_avg
=
tf
.
divide
(
conf_sum
,
tf_num_tilesf
,
name
=
"conf_avg"
)
w
=
w_clip
conf_avg1
=
tf
.
subtract
(
conf_avg
,
tf_1f
,
name
=
"conf_avg1"
)
else
:
conf_avg2
=
tf
.
square
(
conf_avg1
,
name
=
"conf_avg2"
)
w
=
tf
.
pow
(
w_clip
,
tf_gt_conf_pwr
,
name
=
"w"
)
cost2
=
tf
.
multiply
(
conf_avg2
,
tf_lambda_conf_avg
,
name
=
"cost2"
)
if
use_confidence
:
iconf_avg
=
tf
.
divide
(
tf_1f
,
conf_avg
,
name
=
"iconf_avg"
)
tf_num_tilesf
=
tf
.
cast
(
tf_num_tiles
,
dtype
=
tf
.
float32
,
name
=
"tf_num_tilesf"
)
nconf
=
tf
.
multiply
(
conf_slice
,
iconf_avg
,
name
=
"nconf"
)
#normalized confidence
# conf_slice = tf.slice(out_batch,[0,1],[-1,1], name = "conf_slice")
nconf_pwr
=
tf
.
pow
(
nconf
,
conf_pwr
,
name
=
"nconf_pwr"
)
conf_slice
=
tf
.
reshape
(
out_batch
[:,
1
],[
-
1
],
name
=
"conf_slice"
)
nconf_pwr_sum
=
tf
.
reduce_sum
(
nconf_pwr
,
name
=
"nconf_pwr_sum"
)
conf_sum
=
tf
.
reduce_sum
(
conf_slice
,
name
=
"conf_sum"
)
nconf_pwr_offs
=
tf
.
subtract
(
nconf_pwr_sum
,
tf_1f
,
name
=
"nconf_pwr_offs"
)
conf_avg
=
tf
.
divide
(
conf_sum
,
tf_num_tilesf
,
name
=
"conf_avg"
)
cost3
=
tf
.
multiply
(
conf_avg2
,
nconf_pwr_offs
,
name
=
"cost3"
)
conf_avg1
=
tf
.
subtract
(
conf_avg
,
tf_1f
,
name
=
"conf_avg1"
)
w_all
=
tf
.
multiply
(
w
,
nconf
,
name
=
"w_all"
)
conf_avg2
=
tf
.
square
(
conf_avg1
,
name
=
"conf_avg2"
)
else
:
cost2
=
tf
.
multiply
(
conf_avg2
,
tf_lambda_conf_avg
,
name
=
"cost2"
)
w_all
=
w
cost2
=
0.0
cost3
=
0.0
# normalize weights
w_sum
=
tf
.
reduce_sum
(
w_all
,
name
=
"w_sum"
)
iw_sum
=
tf
.
divide
(
tf_1f
,
w_sum
,
name
=
"iw_sum"
)
w_norm
=
tf
.
multiply
(
w_all
,
iw_sum
,
name
=
"w_norm"
)
# disp_slice = tf.slice(out_batch,[0,0],[-1,1], name = "disp_slice")
iconf_avg
=
tf
.
divide
(
tf_1f
,
conf_avg
,
name
=
"iconf_avg"
)
# d_gt_slice = tf.slice(gt_ds_batch,[0,0],[-1,1], name = "d_gt_slice")
nconf
=
tf
.
multiply
(
conf_slice
,
iconf_avg
,
name
=
"nconf"
)
#normalized confidence
disp_slice
=
tf
.
reshape
(
out_batch
[:,
0
],[
-
1
],
name
=
"disp_slice"
)
nconf_pwr
=
tf
.
pow
(
nconf
,
conf_pwr
,
name
=
"nconf_pwr"
)
d_gt_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
0
],[
-
1
],
name
=
"d_gt_slice"
)
nconf_pwr_sum
=
tf
.
reduce_sum
(
nconf_pwr
,
name
=
"nconf_pwr_sum"
)
if
absolute_disparity
:
nconf_pwr_offs
=
tf
.
subtract
(
nconf_pwr_sum
,
tf_1f
,
name
=
"nconf_pwr_offs"
)
out_diff
=
tf
.
subtract
(
disp_slice
,
d_gt_slice
,
name
=
"out_diff"
)
cost3
=
tf
.
multiply
(
conf_avg2
,
nconf_pwr_offs
,
name
=
"cost3"
)
else
:
w_all
=
tf
.
multiply
(
w
,
nconf
,
name
=
"w_all"
)
td_flat
=
tf
.
reshape
(
target_disparity_batch
,[
-
1
],
name
=
"td_flat"
)
else
:
residual_disp
=
tf
.
subtract
(
d_gt_slice
,
td_flat
,
name
=
"residual_disp"
)
w_all
=
w
out_diff
=
tf
.
subtract
(
disp_slice
,
residual_disp
,
name
=
"out_diff"
)
# cost2 = 0.0
out_diff2
=
tf
.
square
(
out_diff
,
name
=
"out_diff2"
)
# cost3 = 0.0
out_wdiff2
=
tf
.
multiply
(
out_diff2
,
w_norm
,
name
=
"out_wdiff2"
)
# normalize weights
cost1
=
tf
.
reduce_sum
(
out_wdiff2
,
name
=
"cost1"
)
w_sum
=
tf
.
reduce_sum
(
w_all
,
name
=
"w_sum"
)
if
use_confidence
:
iw_sum
=
tf
.
divide
(
tf_1f
,
w_sum
,
name
=
"iw_sum"
)
cost12
=
tf
.
add
(
cost1
,
cost2
,
name
=
"cost12"
)
w_norm
=
tf
.
multiply
(
w_all
,
iw_sum
,
name
=
"w_norm"
)
cost123
=
tf
.
add
(
cost12
,
cost3
,
name
=
"cost123"
)
return
cost123
,
disp_slice
,
d_gt_slice
,
out_diff
,
out_diff2
,
w_norm
,
out_wdiff2
,
cost1
# disp_slice = tf.slice(out_batch,[0,0],[-1,1], name = "disp_slice")
else
:
# d_gt_slice = tf.slice(gt_ds_batch,[0,0],[-1,1], name = "d_gt_slice")
return
cost1
,
disp_slice
,
d_gt_slice
,
out_diff
,
out_diff2
,
w_norm
,
out_wdiff2
,
cost1
disp_slice
=
tf
.
reshape
(
out_batch
[:,
0
],[
-
1
],
name
=
"disp_slice"
)
d_gt_slice
=
tf
.
reshape
(
gt_ds_batch
[:,
0
],[
-
1
],
name
=
"d_gt_slice"
)
if
absolute_disparity
:
out_diff
=
tf
.
subtract
(
disp_slice
,
d_gt_slice
,
name
=
"out_diff"
)
else
:
td_flat
=
tf
.
reshape
(
target_disparity_batch
,[
-
1
],
name
=
"td_flat"
)
residual_disp
=
tf
.
subtract
(
d_gt_slice
,
td_flat
,
name
=
"residual_disp"
)
out_diff
=
tf
.
subtract
(
disp_slice
,
residual_disp
,
name
=
"out_diff"
)
out_diff2
=
tf
.
square
(
out_diff
,
name
=
"out_diff2"
)
out_wdiff2
=
tf
.
multiply
(
out_diff2
,
w_norm
,
name
=
"out_wdiff2"
)
cost1
=
tf
.
reduce_sum
(
out_wdiff2
,
name
=
"cost1"
)
if
use_confidence
:
cost12
=
tf
.
add
(
cost1
,
cost2
,
name
=
"cost12"
)
cost123
=
tf
.
add
(
cost12
,
cost3
,
name
=
"cost123"
)
return
cost123
,
disp_slice
,
d_gt_slice
,
out_diff
,
out_diff2
,
w_norm
,
out_wdiff2
,
cost1
else
:
return
cost1
,
disp_slice
,
d_gt_slice
,
out_diff
,
out_diff2
,
w_norm
,
out_wdiff2
,
cost1
#corr2d325 = tf.concat([corr2d,target_disparity],0)
#corr2d325 = tf.concat([corr2d,target_disparity],0)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment