Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
python3-imagej-tiff
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
python3-imagej-tiff
Commits
3519f5ec
Commit
3519f5ec
authored
Sep 05, 2018
by
Andrey Filippov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
next versions, some cleanup
parent
51cc074c
Changes
5
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
1270 additions
and
103 deletions
+1270
-103
explore_data5.py
explore_data5.py
+1147
-0
nn_ds_neibs16.py
nn_ds_neibs16.py
+12
-12
qcstereo_functions.py
qcstereo_functions.py
+42
-26
qcstereo_losses.py
qcstereo_losses.py
+6
-6
qcstereo_network.py
qcstereo_network.py
+63
-59
No files found.
explore_data5.py
0 → 100644
View file @
3519f5ec
This diff is collapsed.
Click to expand it.
nn_ds_neibs16.py
View file @
3519f5ec
#!/usr/bin/env python3
#!/usr/bin/env python3
from
numpy
import
float64
##
from numpy import float64
from
tensorflow.contrib.losses.python.metric_learning.metric_loss_ops
import
npairs_loss
##
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import npairs_loss
from
debian.deb822
import
PdiffIndex
##
from debian.deb822 import PdiffIndex
__copyright__
=
"Copyright 2018, Elphel, Inc."
__copyright__
=
"Copyright 2018, Elphel, Inc."
__license__
=
"GPL-3.0+"
__license__
=
"GPL-3.0+"
__email__
=
"andrey@elphel.com"
__email__
=
"andrey@elphel.com"
from
PIL
import
Image
##
from PIL import Image
import
os
import
os
import
sys
import
sys
import
glob
##
import glob
import
numpy
as
np
import
numpy
as
np
import
itertools
##
import itertools
import
time
import
time
import
matplotlib.pyplot
as
plt
##
import matplotlib.pyplot as plt
import
shutil
import
shutil
from
threading
import
Thread
from
threading
import
Thread
...
@@ -49,7 +49,7 @@ except IndexError:
...
@@ -49,7 +49,7 @@ except IndexError:
root_dir
=
os
.
path
.
dirname
(
conf_file
)
root_dir
=
os
.
path
.
dirname
(
conf_file
)
print
(
"Configuration file: "
+
conf_file
)
print
(
"Configuration file: "
+
conf_file
)
parameters
,
dirs
,
files
=
qsf
.
parseXmlConfig
(
conf_file
,
root_dir
)
parameters
,
dirs
,
files
,
_
=
qsf
.
parseXmlConfig
(
conf_file
,
root_dir
)
"""
"""
Temporarily for backward compatibility
Temporarily for backward compatibility
"""
"""
...
@@ -221,7 +221,7 @@ if SPREAD_CONVERGENCE:
...
@@ -221,7 +221,7 @@ if SPREAD_CONVERGENCE:
else
:
else
:
outs
,
inp_weights
=
qcstereo_network
.
networks_siam
(
outs
,
inp_weights
=
qcstereo_network
.
networks_siam
(
input
=
corr2d_Nx325
,
input
_tensor
=
corr2d_Nx325
,
input_global
=
None
,
input_global
=
None
,
layout1
=
NN_LAYOUT1
,
layout1
=
NN_LAYOUT1
,
layout2
=
NN_LAYOUT2
,
layout2
=
NN_LAYOUT2
,
...
@@ -247,7 +247,7 @@ G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdif
...
@@ -247,7 +247,7 @@ G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdif
absolute_disparity
=
ABSOLUTE_DISPARITY
,
absolute_disparity
=
ABSOLUTE_DISPARITY
,
use_confidence
=
USE_CONFIDENCE
,
# True,
use_confidence
=
USE_CONFIDENCE
,
# True,
lambda_conf_avg
=
0.01
,
lambda_conf_avg
=
0.01
,
lambda_conf_pwr
=
0.1
,
##
lambda_conf_pwr = 0.1,
conf_pwr
=
2.0
,
conf_pwr
=
2.0
,
gt_conf_offset
=
0.08
,
gt_conf_offset
=
0.08
,
gt_conf_pwr
=
2.0
,
gt_conf_pwr
=
2.0
,
...
@@ -268,7 +268,7 @@ for n in range (1,len(partials)):
...
@@ -268,7 +268,7 @@ for n in range (1,len(partials)):
absolute_disparity
=
ABSOLUTE_DISPARITY
,
absolute_disparity
=
ABSOLUTE_DISPARITY
,
use_confidence
=
USE_CONFIDENCE
,
# True,
use_confidence
=
USE_CONFIDENCE
,
# True,
lambda_conf_avg
=
0.01
,
lambda_conf_avg
=
0.01
,
lambda_conf_pwr
=
0.1
,
#
lambda_conf_pwr = 0.1,
conf_pwr
=
2.0
,
conf_pwr
=
2.0
,
gt_conf_offset
=
0.08
,
gt_conf_offset
=
0.08
,
gt_conf_pwr
=
2.0
,
gt_conf_pwr
=
2.0
,
...
@@ -702,7 +702,7 @@ with tf.Session() as sess:
...
@@ -702,7 +702,7 @@ with tf.Session() as sess:
if
ntest
>
0
:
if
ntest
>
0
:
image_data
[
ntest
]
=
None
image_data
[
ntest
]
=
None
# Close writers
# Close writers
train_writer
.
close
()
train_writer
.
close
()
test_writer
.
close
()
test_writer
.
close
()
test_writer1
.
close
()
test_writer1
.
close
()
...
...
qcstereo_functions.py
View file @
3519f5ec
...
@@ -30,13 +30,14 @@ def print_time(txt="",end="\n"):
...
@@ -30,13 +30,14 @@ def print_time(txt="",end="\n"):
txt
+=
" "
txt
+=
" "
print
((
"
%
s"
+
bcolors
.
BOLDWHITE
+
"at
%.4
fs (+
%.4
fs)"
+
bcolors
.
ENDC
)
%
(
txt
,
t
-
TIME_START
,
t
-
TIME_LAST
),
end
=
end
,
flush
=
True
)
print
((
"
%
s"
+
bcolors
.
BOLDWHITE
+
"at
%.4
fs (+
%.4
fs)"
+
bcolors
.
ENDC
)
%
(
txt
,
t
-
TIME_START
,
t
-
TIME_LAST
),
end
=
end
,
flush
=
True
)
TIME_LAST
=
t
TIME_LAST
=
t
def
parseXmlConfig
(
conf_file
,
root_dir
):
def
parseXmlConfig
(
conf_file
,
root_dir
):
tree
=
ET
.
parse
(
conf_file
)
tree
=
ET
.
parse
(
conf_file
)
root
=
tree
.
getroot
()
root
=
tree
.
getroot
()
parameters
=
{}
parameters
=
{}
for
p
in
root
.
find
(
'parameters'
):
for
p
in
root
.
find
(
'parameters'
):
parameters
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
parameters
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
globals
#
globals
dirs
=
{}
dirs
=
{}
for
p
in
root
.
find
(
'directories'
):
for
p
in
root
.
find
(
'directories'
):
dirs
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
dirs
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
...
@@ -46,7 +47,11 @@ def parseXmlConfig(conf_file, root_dir):
...
@@ -46,7 +47,11 @@ def parseXmlConfig(conf_file, root_dir):
for
p
in
root
.
find
(
'files'
):
for
p
in
root
.
find
(
'files'
):
files
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
files
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
# globals().update(parameters)
# globals().update(parameters)
return
parameters
,
dirs
,
files
dbg_parameters
=
{}
for
p
in
root
.
find
(
'dbg_parameters'
):
dbg_parameters
[
p
.
tag
]
=
eval
(
p
.
text
.
strip
())
return
parameters
,
dirs
,
files
,
dbg_parameters
...
@@ -84,7 +89,8 @@ def readTFRewcordsEpoch(train_filename):
...
@@ -84,7 +89,8 @@ def readTFRewcordsEpoch(train_filename):
npy_dir_name
=
"npy"
npy_dir_name
=
"npy"
dirname
=
os
.
path
.
dirname
(
train_filename
)
dirname
=
os
.
path
.
dirname
(
train_filename
)
npy_dir
=
os
.
path
.
join
(
dirname
,
npy_dir_name
)
npy_dir
=
os
.
path
.
join
(
dirname
,
npy_dir_name
)
filebasename
,
file_extension
=
os
.
path
.
splitext
(
train_filename
)
# filebasename, file_extension = os.path.splitext(train_filename)
filebasename
,
_
=
os
.
path
.
splitext
(
train_filename
)
filebasename
=
os
.
path
.
basename
(
filebasename
)
filebasename
=
os
.
path
.
basename
(
filebasename
)
file_corr2d
=
os
.
path
.
join
(
npy_dir
,
filebasename
+
'_corr2d.npy'
)
file_corr2d
=
os
.
path
.
join
(
npy_dir
,
filebasename
+
'_corr2d.npy'
)
file_target_disparity
=
os
.
path
.
join
(
npy_dir
,
filebasename
+
'_target_disparity.npy'
)
file_target_disparity
=
os
.
path
.
join
(
npy_dir
,
filebasename
+
'_target_disparity.npy'
)
...
@@ -179,7 +185,7 @@ def add_neibs(npa_ext,radius):
...
@@ -179,7 +185,7 @@ def add_neibs(npa_ext,radius):
height
=
npa_ext
.
shape
[
0
]
-
2
*
radius
height
=
npa_ext
.
shape
[
0
]
-
2
*
radius
width
=
npa_ext
.
shape
[
1
]
-
2
*
radius
width
=
npa_ext
.
shape
[
1
]
-
2
*
radius
side
=
2
*
radius
+
1
side
=
2
*
radius
+
1
size
=
side
*
side
#
size = side * side
npa_neib
=
np
.
empty
((
height
,
width
,
side
,
side
,
npa_ext
.
shape
[
2
]),
dtype
=
npa_ext
.
dtype
)
npa_neib
=
np
.
empty
((
height
,
width
,
side
,
side
,
npa_ext
.
shape
[
2
]),
dtype
=
npa_ext
.
dtype
)
for
dy
in
range
(
side
):
for
dy
in
range
(
side
):
for
dx
in
range
(
side
):
for
dx
in
range
(
side
):
...
@@ -187,8 +193,8 @@ def add_neibs(npa_ext,radius):
...
@@ -187,8 +193,8 @@ def add_neibs(npa_ext,radius):
return
npa_neib
.
reshape
(
height
,
width
,
-
1
)
return
npa_neib
.
reshape
(
height
,
width
,
-
1
)
def
extend_img_to_clusters
(
datasets_img
,
radius
,
width
):
# = 324):
def
extend_img_to_clusters
(
datasets_img
,
radius
,
width
):
# = 324):
side
=
2
*
radius
+
1
#
side = 2 * radius + 1
size
=
side
*
side
#
size = side * side
if
len
(
datasets_img
)
==
0
:
if
len
(
datasets_img
)
==
0
:
return
return
num_tiles
=
datasets_img
[
0
][
'corr2d'
]
.
shape
[
0
]
num_tiles
=
datasets_img
[
0
][
'corr2d'
]
.
shape
[
0
]
...
@@ -210,7 +216,7 @@ def reformat_to_clusters(datasets_data, cluster_radius):
...
@@ -210,7 +216,7 @@ def reformat_to_clusters(datasets_data, cluster_radius):
def
flip_horizontal
(
datasets_data
,
cluster_radius
,
tile_layers
,
tile_side
):
def
flip_horizontal
(
datasets_data
,
cluster_radius
,
tile_layers
,
tile_side
):
cluster_side
=
2
*
cluster_radius
+
1
cluster_side
=
2
*
cluster_radius
+
1
cluster_size
=
cluster_side
*
cluster_side
#
cluster_size = cluster_side * cluster_side
"""
"""
TILE_LAYERS = 4
TILE_LAYERS = 4
TILE_SIDE = 9 # 7
TILE_SIDE = 9 # 7
...
@@ -238,8 +244,8 @@ TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
...
@@ -238,8 +244,8 @@ TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
rec
[
'target_disparity'
]
=
target_disparity
.
reshape
((
target_disparity
.
shape
[
0
],
-
1
))
rec
[
'target_disparity'
]
=
target_disparity
.
reshape
((
target_disparity
.
shape
[
0
],
-
1
))
rec
[
'gt_ds'
]
=
gt_ds
.
reshape
((
gt_ds
.
shape
[
0
],
-
1
))
rec
[
'gt_ds'
]
=
gt_ds
.
reshape
((
gt_ds
.
shape
[
0
],
-
1
))
def
replace_nan
(
datasets_data
,
cluster_radius
):
def
replace_nan
(
datasets_data
):
#
, cluster_radius):
cluster_size
=
(
2
*
cluster_radius
+
1
)
*
(
2
*
cluster_radius
+
1
)
#
cluster_size = (2 * cluster_radius + 1) * (2 * cluster_radius + 1)
# Reformat input data
# Reformat input data
for
rec
in
datasets_data
:
for
rec
in
datasets_data
:
if
not
rec
is
None
:
if
not
rec
is
None
:
...
@@ -259,7 +265,7 @@ def permute_to_swaps(perm):
...
@@ -259,7 +265,7 @@ def permute_to_swaps(perm):
def
shuffle_in_place
(
datasets_data
,
indx
,
period
):
def
shuffle_in_place
(
datasets_data
,
indx
,
period
):
swaps
=
permute_to_swaps
(
np
.
random
.
permutation
(
len
(
datasets_data
)))
swaps
=
permute_to_swaps
(
np
.
random
.
permutation
(
len
(
datasets_data
)))
num_entries
=
datasets_data
[
0
][
'corr2d'
]
.
shape
[
0
]
//
period
#
num_entries = datasets_data[0]['corr2d'].shape[0] // period
for
swp
in
swaps
:
for
swp
in
swaps
:
ds0
=
datasets_data
[
swp
[
0
]]
ds0
=
datasets_data
[
swp
[
0
]]
ds1
=
datasets_data
[
swp
[
1
]]
ds1
=
datasets_data
[
swp
[
1
]]
...
@@ -279,9 +285,10 @@ def shuffle_chunks_in_place(datasets_data, tiles_groups_per_chunk):
...
@@ -279,9 +285,10 @@ def shuffle_chunks_in_place(datasets_data, tiles_groups_per_chunk):
"""
"""
Improve shuffling by preserving indices inside batches (0 <->0, ... 39 <->39 for 40 tile group batches)
Improve shuffling by preserving indices inside batches (0 <->0, ... 39 <->39 for 40 tile group batches)
"""
"""
num_files
=
len
(
datasets_data
)
#
num_files = len(datasets_data)
#chunks_per_file = datasets_data[0]['target_disparity']
#chunks_per_file = datasets_data[0]['target_disparity']
for
nf
,
ds
in
enumerate
(
datasets_data
):
# for nf, ds in enumerate(datasets_data):
for
ds
in
datasets_data
:
groups_per_file
=
ds
[
'corr2d'
]
.
shape
[
0
]
groups_per_file
=
ds
[
'corr2d'
]
.
shape
[
0
]
chunks_per_file
=
groups_per_file
//
tiles_groups_per_chunk
chunks_per_file
=
groups_per_file
//
tiles_groups_per_chunk
permut
=
np
.
random
.
permutation
(
chunks_per_file
)
permut
=
np
.
random
.
permutation
(
chunks_per_file
)
...
@@ -327,7 +334,8 @@ def zip_lvar_hvar(datasets_all_data, del_src = True):
...
@@ -327,7 +334,8 @@ def zip_lvar_hvar(datasets_all_data, del_src = True):
'target_disparity'
:
np
.
empty
((
recs
[
0
][
'target_disparity'
]
.
shape
[
0
]
*
num_sets_to_combine
,
recs
[
0
][
'target_disparity'
]
.
shape
[
1
]),
dtype
=
np
.
float32
),
'target_disparity'
:
np
.
empty
((
recs
[
0
][
'target_disparity'
]
.
shape
[
0
]
*
num_sets_to_combine
,
recs
[
0
][
'target_disparity'
]
.
shape
[
1
]),
dtype
=
np
.
float32
),
'gt_ds'
:
np
.
empty
((
recs
[
0
][
'gt_ds'
]
.
shape
[
0
]
*
num_sets_to_combine
,
recs
[
0
][
'gt_ds'
]
.
shape
[
1
]),
dtype
=
np
.
float32
)}
'gt_ds'
:
np
.
empty
((
recs
[
0
][
'gt_ds'
]
.
shape
[
0
]
*
num_sets_to_combine
,
recs
[
0
][
'gt_ds'
]
.
shape
[
1
]),
dtype
=
np
.
float32
)}
for
nset
,
reci
in
enumerate
(
recs
):
# for nset, reci in enumerate(recs):
for
nset
,
_
in
enumerate
(
recs
):
rec
[
'corr2d'
]
[
nset
::
num_sets_to_combine
]
=
recs
[
nset
][
'corr2d'
]
rec
[
'corr2d'
]
[
nset
::
num_sets_to_combine
]
=
recs
[
nset
][
'corr2d'
]
rec
[
'target_disparity'
][
nset
::
num_sets_to_combine
]
=
recs
[
nset
][
'target_disparity'
]
rec
[
'target_disparity'
][
nset
::
num_sets_to_combine
]
=
recs
[
nset
][
'target_disparity'
]
rec
[
'gt_ds'
]
[
nset
::
num_sets_to_combine
]
=
recs
[
nset
][
'gt_ds'
]
rec
[
'gt_ds'
]
[
nset
::
num_sets_to_combine
]
=
recs
[
nset
][
'gt_ds'
]
...
@@ -356,10 +364,10 @@ def initTrainTestData(
...
@@ -356,10 +364,10 @@ def initTrainTestData(
max_files_per_group
,
# shuffling buffer for files
max_files_per_group
,
# shuffling buffer for files
two_trains
,
two_trains
,
train_next
):
train_next
):
datasets_train_lvar
=
[]
#
datasets_train_lvar = []
datasets_train_hvar
=
[]
#
datasets_train_hvar = []
datasets_train_lvar1
=
[]
#
datasets_train_lvar1 = []
datasets_train_hvar1
=
[]
#
datasets_train_hvar1 = []
datasets_train_all
=
[[],[],[],[]]
datasets_train_all
=
[[],[],[],[]]
for
n_train
,
f_train
in
enumerate
(
files
[
'train'
]):
for
n_train
,
f_train
in
enumerate
(
files
[
'train'
]):
if
len
(
f_train
)
and
((
n_train
<
2
)
or
two_trains
):
if
len
(
f_train
)
and
((
n_train
<
2
)
or
two_trains
):
...
@@ -445,7 +453,8 @@ def readImageData(image_data,
...
@@ -445,7 +453,8 @@ def readImageData(image_data,
cluster_radius
,
cluster_radius
,
width
)
width
)
if
replace_nans
:
if
replace_nans
:
replace_nan
([
image_data
[
indx
]],
cluster_radius
)
# replace_nan([image_data[indx]], cluster_radius)
replace_nan
([
image_data
[
indx
]])
return
image_data
[
indx
]
return
image_data
[
indx
]
...
@@ -477,7 +486,7 @@ def evaluateAllResults(result_files, absolute_disparity, cluster_radius):
...
@@ -477,7 +486,7 @@ def evaluateAllResults(result_files, absolute_disparity, cluster_radius):
def
result_npy_
to_tiff
(
npy_path
,
absolute
,
fix_nan
,
insert_deltas
=
True
):
def
result_npy_
prepare
(
npy_path
,
absolute
,
fix_nan
,
insert_deltas
=
True
):
"""
"""
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
...
@@ -485,10 +494,9 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True):
...
@@ -485,10 +494,9 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True):
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
"""
"""
tiff_path
=
npy_path
.
replace
(
'.npy'
,
'.tiff'
)
data
=
np
.
load
(
npy_path
)
#(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
data
=
np
.
load
(
npy_path
)
#(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
nn_out
=
0
nn_out
=
0
target_disparity
=
1
#
target_disparity = 1
gt_disparity
=
2
gt_disparity
=
2
gt_strength
=
3
gt_strength
=
3
if
not
absolute
:
if
not
absolute
:
...
@@ -501,20 +509,28 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True):
...
@@ -501,20 +509,28 @@ def result_npy_to_tiff(npy_path, absolute, fix_nan, insert_deltas=True):
data
=
np
.
concatenate
([
data
[
...
,
0
:
4
],
data
[
...
,
0
:
2
],
data
[
...
,
0
:
2
],
data
[
...
,
4
:]],
axis
=
2
)
data
=
np
.
concatenate
([
data
[
...
,
0
:
4
],
data
[
...
,
0
:
2
],
data
[
...
,
0
:
2
],
data
[
...
,
4
:]],
axis
=
2
)
data
[
...
,
6
]
-=
data
[
...
,
gt_disparity
]
data
[
...
,
6
]
-=
data
[
...
,
gt_disparity
]
data
[
...
,
7
]
-=
data
[
...
,
gt_disparity
]
data
[
...
,
7
]
-=
data
[
...
,
gt_disparity
]
for
l
in
[
4
,
5
,
6
,
7
]:
for
l
in
[
2
,
4
,
5
,
6
,
7
]:
data
[
...
,
l
]
=
np
.
select
([
data
[
...
,
gt_strength
]
==
0.0
,
data
[
...
,
gt_strength
]
>
0.0
],
[
np
.
nan
,
data
[
...
,
l
]])
data
[
...
,
l
]
=
np
.
select
([
data
[
...
,
gt_strength
]
==
0.0
,
data
[
...
,
gt_strength
]
>
0.0
],
[
np
.
nan
,
data
[
...
,
l
]])
# All other layers - mast too
# All other layers - mast too
for
l
in
range
(
8
,
data
.
shape
[
2
]):
for
l
in
range
(
8
,
data
.
shape
[
2
]):
data
[
...
,
l
]
=
np
.
select
([
data
[
...
,
gt_strength
]
==
0.0
,
data
[
...
,
gt_strength
]
>
0.0
],
[
np
.
nan
,
data
[
...
,
l
]])
data
[
...
,
l
]
=
np
.
select
([
data
[
...
,
gt_strength
]
==
0.0
,
data
[
...
,
gt_strength
]
>
0.0
],
[
np
.
nan
,
data
[
...
,
l
]])
return
data
# data[...,4] = np.select([data[...,3]==0.0, data[...,3]>0.0], [np.nan,data[...,4]])
# data[...,5] = np.select([data[...,3]==0.0, data[...,3]>0.0], [np.nan,data[...,5]])
def
result_npy_to_tiff
(
npy_path
,
absolute
,
fix_nan
,
insert_deltas
=
True
):
"""
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
data will be written as 4-layer tiff, extension '.npy' replaced with '.tiff'
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
"""
data
=
result_npy_prepare
(
npy_path
,
absolute
,
fix_nan
,
insert_deltas
)
tiff_path
=
npy_path
.
replace
(
'.npy'
,
'.tiff'
)
data
=
data
.
transpose
(
2
,
0
,
1
)
data
=
data
.
transpose
(
2
,
0
,
1
)
print
(
"Saving results to TIFF: "
+
tiff_path
)
print
(
"Saving results to TIFF: "
+
tiff_path
)
imagej_tiffwriter
.
save
(
tiff_path
,
data
[
...
,
np
.
newaxis
])
imagej_tiffwriter
.
save
(
tiff_path
,
data
[
...
,
np
.
newaxis
])
def
eval_results
(
rslt_path
,
absolute
,
def
eval_results
(
rslt_path
,
absolute
,
min_disp
=
-
0.1
,
#minimal GT disparity
min_disp
=
-
0.1
,
#minimal GT disparity
max_disp
=
20.0
,
# maximal GT disparity
max_disp
=
20.0
,
# maximal GT disparity
...
...
qcstereo_losses.py
View file @
3519f5ec
...
@@ -4,7 +4,7 @@ __license__ = "GPL-3.0+"
...
@@ -4,7 +4,7 @@ __license__ = "GPL-3.0+"
__email__
=
"andrey@elphel.com"
__email__
=
"andrey@elphel.com"
#from numpy import float64
#from numpy import float64
import
numpy
as
np
#
import numpy as np
import
tensorflow
as
tf
import
tensorflow
as
tf
def
smoothLoss
(
out_batch
,
# [batch_size,(1..2)] tf_result
def
smoothLoss
(
out_batch
,
# [batch_size,(1..2)] tf_result
...
@@ -76,7 +76,7 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -76,7 +76,7 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
absolute_disparity
=
False
,
#when false there should be no activation on disparity output !
absolute_disparity
=
False
,
#when false there should be no activation on disparity output !
use_confidence
=
False
,
use_confidence
=
False
,
lambda_conf_avg
=
0.01
,
lambda_conf_avg
=
0.01
,
lambda_conf_pwr
=
0.1
,
##
lambda_conf_pwr = 0.1,
conf_pwr
=
2.0
,
conf_pwr
=
2.0
,
gt_conf_offset
=
0.08
,
gt_conf_offset
=
0.08
,
gt_conf_pwr
=
1.0
,
gt_conf_pwr
=
1.0
,
...
@@ -90,14 +90,14 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
...
@@ -90,14 +90,14 @@ def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
"""
"""
tf_lambda_conf_avg
=
tf
.
constant
(
lambda_conf_avg
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_avg"
)
tf_lambda_conf_avg
=
tf
.
constant
(
lambda_conf_avg
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_avg"
)
tf_lambda_conf_pwr
=
tf
.
constant
(
lambda_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_lambda_conf_pwr"
)
##
tf_lambda_conf_pwr = tf.constant(lambda_conf_pwr, dtype=tf.float32, name="tf_lambda_conf_pwr")
tf_conf_pwr
=
tf
.
constant
(
conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_conf_pwr"
)
##
tf_conf_pwr = tf.constant(conf_pwr, dtype=tf.float32, name="tf_conf_pwr")
tf_gt_conf_offset
=
tf
.
constant
(
gt_conf_offset
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_offset"
)
tf_gt_conf_offset
=
tf
.
constant
(
gt_conf_offset
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_offset"
)
tf_gt_conf_pwr
=
tf
.
constant
(
gt_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_pwr"
)
tf_gt_conf_pwr
=
tf
.
constant
(
gt_conf_pwr
,
dtype
=
tf
.
float32
,
name
=
"tf_gt_conf_pwr"
)
tf_num_tiles
=
tf
.
shape
(
gt_ds_batch
)[
0
]
tf_num_tiles
=
tf
.
shape
(
gt_ds_batch
)[
0
]
tf_0f
=
tf
.
constant
(
0.0
,
dtype
=
tf
.
float32
,
name
=
"tf_0f"
)
tf_0f
=
tf
.
constant
(
0.0
,
dtype
=
tf
.
float32
,
name
=
"tf_0f"
)
tf_1f
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_1f"
)
tf_1f
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_1f"
)
tf_maxw
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
"tf_maxw"
)
##
tf_maxw = tf.constant(1.0, dtype=tf.float32, name="tf_maxw")
tf_disp_diff_cap2
=
tf
.
constant
(
disp_diff_cap
*
disp_diff_cap
,
dtype
=
tf
.
float32
,
name
=
"disp_diff_cap2"
)
tf_disp_diff_cap2
=
tf
.
constant
(
disp_diff_cap
*
disp_diff_cap
,
dtype
=
tf
.
float32
,
name
=
"disp_diff_cap2"
)
tf_disp_diff_slope
=
tf
.
constant
(
disp_diff_slope
,
dtype
=
tf
.
float32
,
name
=
"disp_diff_slope"
)
tf_disp_diff_slope
=
tf
.
constant
(
disp_diff_slope
,
dtype
=
tf
.
float32
,
name
=
"disp_diff_slope"
)
...
@@ -197,7 +197,7 @@ def weightsLoss(inp_weights,
...
@@ -197,7 +197,7 @@ def weightsLoss(inp_weights,
tile_side
,
tile_side
,
wborders_zero
):
wborders_zero
):
# [batch_size,(1..2)] tf_result
# [batch_size,(1..2)] tf_result
# weights_lambdas): # single lambda or same length as inp_weights.shape[1]
# weights_lambdas): # single lambda or same length as inp_weights.shape[1]
"""
"""
Enforcing 'smooth' weights for the input 2d correlation tiles
Enforcing 'smooth' weights for the input 2d correlation tiles
...
...
qcstereo_network.py
View file @
3519f5ec
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment