Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
lwir-nn
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
lwir-nn
Commits
4b02b283
Commit
4b02b283
authored
Jul 29, 2019
by
Andrey Filippov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
more versions, adding more images in the results plot
parent
466ed6b1
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
668 additions
and
238 deletions
+668
-238
explore_data15.py
explore_data15.py
+14
-3
explore_data16.py
explore_data16.py
+20
-19
nn_ds_neibs31.py
nn_ds_neibs31.py
+1
-1
nn_eval_lwir.py
nn_eval_lwir.py
+140
-212
nn_eval_lwir_00.py
nn_eval_lwir_00.py
+490
-0
qcstereo_network.py
qcstereo_network.py
+3
-3
No files found.
explore_data15.py
View file @
4b02b283
...
@@ -1755,7 +1755,7 @@ if __name__ == "__main__":
...
@@ -1755,7 +1755,7 @@ if __name__ == "__main__":
test_corrs
=
[]
test_corrs
=
[]
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
'''
test_sets = [
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
...
@@ -1777,7 +1777,18 @@ if __name__ == "__main__":
...
@@ -1777,7 +1777,18 @@ if __name__ == "__main__":
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"
]
# near trees, olga
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32", # near trees, olga
]
'''
test_sets
=
[
"/data_ssd/lwir_sets/lwir_test6/1562390317_693673/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390318_833313/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390326_354823/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390331_483132/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390333_192523/v01/ml32"
,
# lena
]
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN
=
False
# True # make test to have same number of entries as train ones
TEST_SAME_LENGTH_AS_TRAIN
=
False
# True # make test to have same number of entries as train ones
...
...
explore_data16.py
View file @
4b02b283
...
@@ -1657,14 +1657,15 @@ if __name__ == "__main__":
...
@@ -1657,14 +1657,15 @@ if __name__ == "__main__":
test_corrs
=
[]
test_corrs
=
[]
test_sets
=
[
test_sets
=
[
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32"
,
# andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32b"
,
# andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32"
,
# andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32b"
,
# andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32"
,
# andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32b"
,
# andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32"
,
# 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32b"
,
# 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32"
,
# empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32"
,
# first 3
"/data_ssd/lwir_sets/lwir_test6/1562390251_025390/v01/ml32b"
,
# empty space
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32"
,
# all 3
"/data_ssd/lwir_sets/lwir_test6/1562390257_977146/v01/ml32b"
,
# first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32"
,
# all 3
"/data_ssd/lwir_sets/lwir_test6/1562390260_370347/v01/ml32b"
,
# all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32b"
,
# all 3
"/data_ssd/lwir_sets/lwir_test6/1562390317_693673/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390317_693673/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390318_833313/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390318_833313/v01/ml32"
,
# andrey + olga
...
@@ -1672,12 +1673,12 @@ if __name__ == "__main__":
...
@@ -1672,12 +1673,12 @@ if __name__ == "__main__":
"/data_ssd/lwir_sets/lwir_test6/1562390331_483132/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390331_483132/v01/ml32"
,
# andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390333_192523/v01/ml32"
,
# lena
"/data_ssd/lwir_sets/lwir_test6/1562390333_192523/v01/ml32"
,
# lena
"/data_ssd/lwir_sets/lwir_test
3/1562390402_254007/v01/ml32
"
,
# near moving car
"/data_ssd/lwir_sets/lwir_test
6/1562390402_254007/v01/ml32b
"
,
# near moving car
"/data_ssd/lwir_sets/lwir_test
3/1562390407_382326/v01/ml32
"
,
# near moving car
"/data_ssd/lwir_sets/lwir_test
6/1562390407_382326/v01/ml32b
"
,
# near moving car
"/data_ssd/lwir_sets/lwir_test
3/1562390409_661607/v01/ml32
"
,
# lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test
6/1562390409_661607/v01/ml32b
"
,
# lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test
3/1562390435_873048/v01/ml32
"
,
# 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test
6/1562390435_873048/v01/ml32b
"
,
# 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test
3/1562390456_842237/v01/ml32
"
,
# near trees
"/data_ssd/lwir_sets/lwir_test
6/1562390456_842237/v01/ml32b
"
,
# near trees
"/data_ssd/lwir_sets/lwir_test
3/1562390460_261151/v01/ml32
"
]
# near trees, olga
"/data_ssd/lwir_sets/lwir_test
6/1562390460_261151/v01/ml32b
"
]
# near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN
=
False
# True # make test to have same number of entries as train ones
TEST_SAME_LENGTH_AS_TRAIN
=
False
# True # make test to have same number of entries as train ones
...
...
nn_ds_neibs31.py
View file @
4b02b283
...
@@ -3,7 +3,7 @@ __copyright__ = "Copyright 2018-2019, Elphel, Inc."
...
@@ -3,7 +3,7 @@ __copyright__ = "Copyright 2018-2019, Elphel, Inc."
__license__
=
"GPL-3.0+"
__license__
=
"GPL-3.0+"
__email__
=
"andrey@elphel.com"
__email__
=
"andrey@elphel.com"
#python3 nn_ds_neibs31.py /data_ssd/lwir_sets/conf/qcstereo_lwir
05
.xml /data_ssd/lwir_sets/
#python3 nn_ds_neibs31.py /data_ssd/lwir_sets/conf/qcstereo_lwir
21
.xml /data_ssd/lwir_sets/
#tensorboard --logdir="nn_ds_neibs30_graph13-9RNSWLAM0.5SLAM0.1SCLP0.2_nG_nI_HF_CP0.3_S0.03" --port=7001
#tensorboard --logdir="nn_ds_neibs30_graph13-9RNSWLAM0.5SLAM0.1SCLP0.2_nG_nI_HF_CP0.3_S0.03" --port=7001
import
os
import
os
...
...
nn_eval_lwir.py
View file @
4b02b283
...
@@ -16,7 +16,7 @@ import sys
...
@@ -16,7 +16,7 @@ import sys
import
imagej_tiffwriter
import
imagej_tiffwriter
import
time
import
time
import
imagej_tiff
as
ijt
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
matplotlib.backends.backend_pdf
import
PdfPages
from
matplotlib.backends.backend_pdf
import
PdfPages
import
qcstereo_functions
as
qsf
import
qcstereo_functions
as
qsf
...
@@ -27,7 +27,8 @@ import numpy as np
...
@@ -27,7 +27,8 @@ import numpy as np
qsf
.
TIME_START
=
time
.
time
()
qsf
.
TIME_START
=
time
.
time
()
qsf
.
TIME_LAST
=
qsf
.
TIME_START
qsf
.
TIME_LAST
=
qsf
.
TIME_START
IMG_WIDTH
=
20
# 324 # tiles per image row Defined in config
#IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
#IMG_HEIGHT = 15 # 324 # tiles per image row Defined in config
DEBUG_LEVEL
=
1
DEBUG_LEVEL
=
1
...
@@ -41,6 +42,12 @@ try:
...
@@ -41,6 +42,12 @@ try:
except
IndexError
:
except
IndexError
:
root_dir
=
os
.
path
.
dirname
(
conf_file
)
root_dir
=
os
.
path
.
dirname
(
conf_file
)
try
:
modes
=
[
sys
.
argv
[
3
]]
# train, infer
except
IndexError
:
modes
=
[
'train'
]
print
(
"Configuration file: "
+
conf_file
)
print
(
"Configuration file: "
+
conf_file
)
parameters
,
dirs
,
files
,
dbg_parameters
=
qsf
.
parseXmlConfig
(
conf_file
,
root_dir
)
parameters
,
dirs
,
files
,
dbg_parameters
=
qsf
.
parseXmlConfig
(
conf_file
,
root_dir
)
"""
"""
...
@@ -53,6 +60,8 @@ if not "SLOSS_CLIP" in parameters:
...
@@ -53,6 +60,8 @@ if not "SLOSS_CLIP" in parameters:
"""
"""
Defined in config file
Defined in config file
"""
"""
IMG_WIDTH
=
None
# 20 # 324 # tiles per image row Defined in config
IMG_HEIGHT
=
None
# 15 # 324 # tiles per image row Defined in config
TILE_SIDE
,
TILE_LAYERS
,
TWO_TRAINS
,
NET_ARCH1
,
NET_ARCH2
=
[
None
]
*
5
TILE_SIDE
,
TILE_LAYERS
,
TWO_TRAINS
,
NET_ARCH1
,
NET_ARCH2
=
[
None
]
*
5
ABSOLUTE_DISPARITY
,
SYM8_SUB
,
WLOSS_LAMBDA
,
SLOSS_LAMBDA
,
SLOSS_CLIP
=
[
None
]
*
5
ABSOLUTE_DISPARITY
,
SYM8_SUB
,
WLOSS_LAMBDA
,
SLOSS_LAMBDA
,
SLOSS_CLIP
=
[
None
]
*
5
SPREAD_CONVERGENCE
,
INTER_CONVERGENCE
,
HOR_FLIP
,
DISP_DIFF_CAP
,
DISP_DIFF_SLOPE
=
[
None
]
*
5
SPREAD_CONVERGENCE
,
INTER_CONVERGENCE
,
HOR_FLIP
,
DISP_DIFF_CAP
,
DISP_DIFF_SLOPE
=
[
None
]
*
5
...
@@ -111,14 +120,19 @@ qsf.prepareFiles(dirs, files, suffix = SUFFIX)
...
@@ -111,14 +120,19 @@ qsf.prepareFiles(dirs, files, suffix = SUFFIX)
CONF_MAX
=
0.7
CONF_MAX
=
0.7
ERR_AMPL
=
0.3
ERR_AMPL
=
0.
4
# 0.
3
TIGHT_TOP
=
0.95
TIGHT_TOP
=
0.95
TIGHT_HPAD
=
1.0
TIGHT_HPAD
=
1.0
TIGHT_WPAD
=
1.0
TIGHT_WPAD
=
1.0
FIGSIZE
=
[
8.5
,
11.0
]
FIGSIZE
=
[
8.5
,
11.0
]
WOI_COLOR
=
"red"
WOI_COLOR
=
"red"
X_COLOR
=
"grey"
X_NEIBS
=
False
TRANSPARENT
=
True
# for export
TRANSPARENT
=
True
# for export
#dbg_parameters
#dbg_parameters
def
get_fig_params
(
disparity_ranges
):
def
get_fig_params
(
disparity_ranges
):
fig_params
=
[]
fig_params
=
[]
...
@@ -138,7 +152,14 @@ def get_fig_params(disparity_ranges):
...
@@ -138,7 +152,14 @@ def get_fig_params(disparity_ranges):
return
fig_params
return
fig_params
#try:
#try:
fig_params
=
get_fig_params
(
dbg_parameters
[
'disparity_ranges'
])
#fig_params = get_fig_params(dbg_parameters['disparity_ranges'])
extra_path
=
os
.
path
.
join
(
root_dir
,
dbg_parameters
[
'extra'
])
eo_width
=
dbg_parameters
[
'eo_params'
][
'width'
]
eo_height
=
dbg_parameters
[
'eo_params'
][
'height'
]
eo_woi
=
dbg_parameters
[
'eo_params'
][
'woi'
]
# (x,y,width, height)
eo_disparity_scale
=
1.0
/
dbg_parameters
[
'eo_params'
][
'disparity_scale'
]
# 14.2
image_sets
=
dbg_parameters
[
'extra_paths'
]
# list of dictionaries
pass
pass
...
@@ -162,10 +183,15 @@ index_gt = 2
...
@@ -162,10 +183,15 @@ index_gt = 2
index_gt_weight
=
3
index_gt_weight
=
3
index_heur_err
=
7
index_heur_err
=
7
index_nn_err
=
6
index_nn_err
=
6
index_mm
=
8
# max-min
index_fgbg_sngl
=
10
index_log
=
9
index_fgbg_neib
=
11
index_bad
=
10
index_num_neibs
=
11
index_mm
=
23
# 8 # max-min
index_log
=
24
# 9
index_bad
=
25
# 10
index_num_neibs
=
26
# 11
index_fgbg
=
[
index_fgbg_sngl
,
index_fgbg_neib
][
X_NEIBS
]
"""
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
"""
...
@@ -181,7 +207,21 @@ if not 'show' in FIGS_SAVESHOW:
...
@@ -181,7 +207,21 @@ if not 'show' in FIGS_SAVESHOW:
#for mode in ['train','infer']:
#for mode in ['train','infer']:
#for mode in ['infer']:
#for mode in ['infer']:
for
mode
in
[
'train'
]:
def
cross_out
(
plt
,
cross_out_mask
):
height
=
cross_out_mask
.
shape
[
0
]
width
=
cross_out_mask
.
shape
[
1
]
for
row
in
range
(
height
):
for
col
in
range
(
width
):
if
cross_out_mask
[
row
,
col
]:
xdata
=
[
col
-
0.3
,
col
+
0.3
]
ydata
=
[
row
-
0.3
,
row
+
0.3
]
plt
.
plot
(
xdata
,
ydata
,
color
=
X_COLOR
)
ydata
=
[
row
+
0.3
,
row
-
0.3
]
plt
.
plot
(
xdata
,
ydata
,
color
=
X_COLOR
)
for
mode
in
modes
:
# ['train']:
figs
=
[]
figs
=
[]
ffiles
=
[]
# no ext
ffiles
=
[]
# no ext
def
setlimsxy
(
lim_xy
):
def
setlimsxy
(
lim_xy
):
...
@@ -189,218 +229,106 @@ for mode in ['train']:
...
@@ -189,218 +229,106 @@ for mode in ['train']:
plt
.
xlim
(
min
(
lim_xy
[:
2
]),
max
(
lim_xy
[:
2
]))
plt
.
xlim
(
min
(
lim_xy
[:
2
]),
max
(
lim_xy
[:
2
]))
plt
.
ylim
(
max
(
lim_xy
[
2
:]),
min
(
lim_xy
[
2
:]))
plt
.
ylim
(
max
(
lim_xy
[
2
:]),
min
(
lim_xy
[
2
:]))
cumul_weights
=
None
cumul_weights
=
None
cmap_disp
=
plt
.
get_cmap
(
'viridis'
)
# ('cividis')
for
nfile
,
fpars
in
enumerate
(
fig_params
):
cmap_diff
=
plt
.
get_cmap
(
'coolwarm'
)
#('seismic') # ('viridis')
if
not
fpars
is
None
:
for
nfile
,
img_pars
in
enumerate
(
image_sets
):
if
not
img_pars
is
None
:
img_file
=
files
[
'result'
][
nfile
]
img_file
=
files
[
'result'
][
nfile
]
if
mode
==
'infer'
:
if
mode
==
'infer'
:
img_file
=
img_file
.
replace
(
'.npy'
,
'-infer.npy'
)
img_file
=
img_file
.
replace
(
'.npy'
,
'-infer.npy'
)
"""
print
(
"Processing image set: "
+
img_file
)
try:
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=True)
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
except:
print ("Image file does not exist:", img_file)
continue
"""
pass
data
,
labels
=
qsf
.
result_npy_prepare
(
img_file
,
ABSOLUTE_DISPARITY
,
fix_nan
=
True
,
insert_deltas
=
3
)
if
True
:
#TIFF_ONLY:
tiff_path
=
img_file
.
replace
(
'.npy'
,
'-test.tiff'
)
data
=
data
.
transpose
(
2
,
0
,
1
)
print
(
"Saving results to TIFF: "
+
tiff_path
)
imagej_tiffwriter
.
save
(
tiff_path
,
data
,
labels
=
labels
)
"""
Calculate histograms
"""
err_heur2
=
data
[
index_heur_err
]
*
data
[
index_heur_err
]
err_nn2
=
data
[
index_nn_err
]
*
data
[
index_nn_err
]
diff_log2
=
data
[
index_log
]
*
data
[
index_log
]
weights
=
(
(
data
[
index_gt
]
<
max_disp
)
&
(
err_heur2
<
max_target_err2
)
&
(
data
[
index_bad
]
<
max_bad
)
&
(
data
[
index_gt_weight
]
>=
min_strength
)
&
(
data
[
index_num_neibs
]
>=
min_neibs
)
&
#max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
(
data
[
index_log
]
<
max_log_to_mm
*
np
.
sqrt
(
data
[
index_mm
])
)
)
.
astype
(
data
.
dtype
)
# 0.0/1.1
#max_disp
#max_target_err
if
use_gt_weights
:
weights
*=
data
[
index_gt_weight
]
mm
=
data
[
index_mm
]
weh
=
np
.
nan_to_num
(
weights
*
err_heur2
)
wen
=
np
.
nan_to_num
(
weights
*
err_nn2
)
wel
=
np
.
nan_to_num
(
weights
*
diff_log2
)
hist_weights
,
bin_vals
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
weights
,
density
=
False
)
hist_err_heur2
,
_
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
weh
,
density
=
False
)
hist_err_nn2
,
_
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
wen
,
density
=
False
)
hist_diff_log2
,
_
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
wel
,
density
=
False
)
if
cumul_weights
is
None
:
cumul_weights
=
hist_weights
cumul_err_heur2
=
hist_err_heur2
cumul_err_nn2
=
hist_err_nn2
cumul_diff_log2
=
hist_diff_log2
else
:
cumul_weights
+=
hist_weights
cumul_err_heur2
+=
hist_err_heur2
cumul_err_nn2
+=
hist_err_nn2
cumul_diff_log2
+=
hist_diff_log2
hist_err_heur2
=
np
.
nan_to_num
(
hist_err_heur2
/
hist_weights
)
hist_err_nn2
=
np
.
nan_to_num
(
hist_err_nn2
/
hist_weights
)
hist_gain2
=
np
.
nan_to_num
(
hist_err_heur2
/
hist_err_nn2
)
hist_gain
=
np
.
sqrt
(
hist_gain2
)
hist_diff_log2
=
np
.
nan_to_num
(
hist_diff_log2
/
hist_weights
)
print
(
"hist_err_heur2"
,
end
=
" "
)
print
(
np
.
sqrt
(
hist_err_heur2
))
print
(
"hist_err_nn2"
,
end
=
" "
)
print
(
np
.
sqrt
(
hist_err_nn2
))
print
(
"hist_gain"
,
end
=
" "
)
print
(
hist_gain
)
print
(
"hist_diff_log2"
,
end
=
" "
)
print
(
np
.
sqrt
(
hist_diff_log2
))
if
min_diff
>
0.0
:
pass
good
=
(
mm
>
min_diff
)
.
astype
(
mm
.
dtype
)
good
/=
good
# good - 1, bad - nan
data
[
index_heur_err
]
*=
good
data
[
index_nn_err
]
*=
good
data
=
data
.
transpose
(
1
,
2
,
0
)
if
TIFF_ONLY
:
data
,
labels
=
qsf
.
result_npy_prepare
(
img_file
,
ABSOLUTE_DISPARITY
,
fix_nan
=
True
,
insert_deltas
=
3
)
continue
cross_out_mask
=
data
[
...
,
index_fgbg
]
<
0.5
#data.shape = (15,20,27)
# for subindex, rng in enumerate(fpars['ranges']):
lim_val
=
img_pars
[
'range'
]
# rng['lim_val']
lim_val
[
0
]
-=
ERR_AMPL
lim_xy
=
[
-
0.5
,
IMG_WIDTH
-
0.5
,
-
0.5
,
IMG_HEIGHT
-
0.5
]
# rng['lim_xy']
for
subindex
,
rng
in
enumerate
(
fpars
[
'ranges'
]):
#start new image page
lim_val
=
rng
[
'lim_val'
]
lim_xy
=
rng
[
'lim_xy'
]
fig
=
plt
.
figure
(
figsize
=
FIGSIZE
)
fig
=
plt
.
figure
(
figsize
=
FIGSIZE
)
fig
.
canvas
.
set_window_title
(
fpars
[
'name'
])
fig
.
canvas
.
set_window_title
(
img_pars
[
'title'
])
fig
.
suptitle
(
fpars
[
'name'
])
fig
.
suptitle
(
img_pars
[
'title'
])
# Create EO DSI image
# load tiff image
img_ds_main
=
ijt
.
imagej_tiff
(
os
.
path
.
join
(
extra_path
,
img_pars
[
'dsi_path'
]
))
ds_main
=
img_ds_main
.
image
[
...
,
img_pars
[
'dsi_slice'
]]
*
eo_disparity_scale
ds_main
=
np
.
maximum
(
ds_main
,
lim_val
[
0
])
ds_main
=
np
.
minimum
(
ds_main
,
lim_val
[
1
])
ax_conf
=
plt
.
subplot
(
322
)
ax_conf
.
set_title
(
"Hi-res camera disparity map"
)
plt
.
imshow
(
ds_main
,
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
],
cmap
=
cmap_disp
)
setlimsxy
([
-
0.5
,
eo_width
-
0.5
,
-
0.5
,
eo_height
-
0.5
])
if
not
eo_woi
is
None
:
pass
# show frame
xdata
=
[
eo_woi
[
'x'
],
eo_woi
[
'x'
]
+
eo_woi
[
'width'
],
eo_woi
[
'x'
]
+
eo_woi
[
'width'
],
eo_woi
[
'x'
],
eo_woi
[
'x'
]]
ydata
=
[
eo_woi
[
'y'
],
eo_woi
[
'y'
],
eo_woi
[
'y'
]
+
eo_woi
[
'height'
],
eo_woi
[
'y'
]
+
eo_woi
[
'height'
],
eo_woi
[
'y'
]]
plt
.
plot
(
xdata
,
ydata
,
color
=
WOI_COLOR
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
'''
# Ground truth confidence - to be replaced
ax_conf=plt.subplot(322)
ax_conf=plt.subplot(322)
ax_conf.set_title("Ground truth confidence")
ax_conf.set_title("Ground truth confidence")
# fig.suptitle("Groud truth confidence")
plt.imshow(data[...,qsf.GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
plt.imshow(data[...,qsf.GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
if not lim_xy is None:
if not lim_xy is None:
pass # show frame
pass # show frame
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
plt.plot(xdata,ydata,color=WOI_COLOR)
plt.plot(xdata,ydata,color=WOI_COLOR)
# setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
plt.colorbar(orientation='vertical') # location='bottom')
'''
ax_gtd
=
plt
.
subplot
(
321
)
ax_gtd
=
plt
.
subplot
(
321
)
ax_gtd
.
set_title
(
"Ground truth disparity map"
)
ax_gtd
.
set_title
(
"Ground truth disparity map"
)
plt
.
imshow
(
data
[
...
,
qsf
.
GT_DISP
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
]
)
plt
.
imshow
(
data
[
...
,
qsf
.
GT_DISP
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
],
cmap
=
cmap_disp
)
setlimsxy
(
lim_xy
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_hed
=
plt
.
subplot
(
323
)
ax_hed
=
plt
.
subplot
(
323
)
ax_hed
.
set_title
(
"Heuristic disparity map"
)
ax_hed
.
set_title
(
"Heuristic disparity map"
)
plt
.
imshow
(
data
[
...
,
qsf
.
HEUR_NAN
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
]
)
plt
.
imshow
(
data
[
...
,
qsf
.
HEUR_NAN
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
],
cmap
=
cmap_disp
)
setlimsxy
(
lim_xy
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_nnd
=
plt
.
subplot
(
325
)
ax_nnd
=
plt
.
subplot
(
325
)
ax_nnd
.
set_title
(
"Network disparity output"
)
ax_nnd
.
set_title
(
"Network disparity output"
)
plt
.
imshow
(
data
[
...
,
qsf
.
NN_NAN
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
]
)
plt
.
imshow
(
data
[
...
,
qsf
.
NN_NAN
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
],
cmap
=
cmap_disp
)
setlimsxy
(
lim_xy
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_hee
=
plt
.
subplot
(
324
)
ax_hee
=
plt
.
subplot
(
324
)
ax_hee
.
set_title
(
"Heuristic disparity error"
)
ax_hee
.
set_title
(
"Heuristic disparity error"
)
plt
.
imshow
(
data
[
...
,
qsf
.
HEUR_DIFF
],
vmin
=-
ERR_AMPL
,
vmax
=
ERR_AMPL
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
imshow
(
data
[
...
,
qsf
.
HEUR_DIFF
],
vmin
=-
ERR_AMPL
,
vmax
=
ERR_AMPL
,
cmap
=
cmap_diff
)
setlimsxy
(
lim_xy
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_nne
=
plt
.
subplot
(
326
)
ax_nne
=
plt
.
subplot
(
326
)
ax_nne
.
set_title
(
"Network disparity error"
)
ax_nne
.
set_title
(
"Network disparity error"
)
plt
.
imshow
(
data
[
...
,
qsf
.
NN_DIFF
],
vmin
=-
ERR_AMPL
,
vmax
=
ERR_AMPL
)
plt
.
imshow
(
data
[
...
,
qsf
.
NN_DIFF
],
vmin
=-
ERR_AMPL
,
vmax
=
ERR_AMPL
,
cmap
=
cmap_diff
)
setlimsxy
(
lim_xy
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
tight_layout
(
rect
=
[
0
,
0
,
1
,
TIGHT_TOP
],
h_pad
=
TIGHT_HPAD
,
w_pad
=
TIGHT_WPAD
)
plt
.
tight_layout
(
rect
=
[
0
,
0
,
1
,
TIGHT_TOP
],
h_pad
=
TIGHT_HPAD
,
w_pad
=
TIGHT_WPAD
)
figs
.
append
(
fig
)
figs
.
append
(
fig
)
fb_noext
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
img_file
))[
0
]
#
fb_noext
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
img_file
))[
0
]
#
if
subindex
>
0
:
#
if subindex > 0:
if
subindex
<
10
:
#
if subindex < 10:
fb_noext
+=
"abcdefghi"
[
subindex
-
1
]
#
fb_noext+="abcdefghi"[subindex-1]
else
:
#
else:
fb_noext
+=
"-"
+
str
(
subindex
)
#
fb_noext+="-"+str(subindex)
ffiles
.
append
(
fb_noext
)
ffiles
.
append
(
fb_noext
)
pass
pass
if
True
:
cumul_err_heur2
=
np
.
nan_to_num
(
cumul_err_heur2
/
cumul_weights
)
cumul_err_nn2
=
np
.
nan_to_num
(
cumul_err_nn2
/
cumul_weights
)
cumul_gain2
=
np
.
nan_to_num
(
cumul_err_heur2
/
cumul_err_nn2
)
cumul_gain
=
np
.
sqrt
(
cumul_gain2
)
cumul_diff_log2
=
np
.
nan_to_num
(
cumul_diff_log2
/
cumul_weights
)
print
(
"cumul_weights"
,
end
=
" "
)
print
(
cumul_weights
)
print
(
"cumul_err_heur"
,
end
=
" "
)
print
(
np
.
sqrt
(
cumul_err_heur2
))
print
(
"cumul_err_nn"
,
end
=
" "
)
print
(
np
.
sqrt
(
cumul_err_nn2
))
print
(
"cumul_gain"
,
end
=
" "
)
print
(
cumul_gain
)
print
(
"cumul_diff_log2"
,
end
=
" "
)
print
(
np
.
sqrt
(
cumul_diff_log2
))
fig
,
ax1
=
plt
.
subplots
()
ax1
.
set_xlabel
(
'3x3 tiles ground truth disparity max-min (pix)'
)
ax1
.
set_ylabel
(
'RMSE
\n
(pix)'
,
color
=
'black'
,
rotation
=
'horizontal'
)
ax1
.
yaxis
.
set_label_coords
(
-
0.045
,
0.92
)
ax1
.
plot
(
bin_vals
[
0
:
-
1
],
np
.
sqrt
(
cumul_err_nn2
),
'tab:red'
,
label
=
"network disparity RMSE"
)
ax1
.
plot
(
bin_vals
[
0
:
-
1
],
np
.
sqrt
(
cumul_err_heur2
),
'tab:green'
,
label
=
"heuristic disparity RMSE"
)
ax1
.
plot
(
bin_vals
[
0
:
-
1
],
np
.
sqrt
(
cumul_diff_log2
),
'tab:cyan'
,
label
=
"ground truth LoG"
)
ax1
.
tick_params
(
axis
=
'y'
,
labelcolor
=
'black'
)
ax2
=
ax1
.
twinx
()
# instantiate a second axes that shares the same x-axis
ax2
.
set_ylabel
(
'weight'
,
color
=
'black'
,
rotation
=
'horizontal'
)
# we already handled the x-label with ax1
ax2
.
yaxis
.
set_label_coords
(
1.06
,
1.0
)
ax2
.
plot
(
bin_vals
[
0
:
-
1
],
cumul_weights
,
color
=
'grey'
,
dashes
=
[
6
,
2
],
label
=
'weights = n_tiles * gt_confidence'
)
ax1
.
legend
(
loc
=
"upper left"
,
bbox_to_anchor
=
(
0.2
,
1.0
))
ax2
.
legend
(
loc
=
"lower right"
,
bbox_to_anchor
=
(
1.0
,
0.1
))
"""
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title('Cumulative')
fig.suptitle('Difference to GT')
# ax_conf=plt.subplot(322)
ax_conf=plt.subplot(211)
ax_conf.set_title("RMS vs max9-min9")
plt.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2),'red',
bin_vals[0:-1], np.sqrt(cumul_err_nn2),'green',
bin_vals[0:-1], np.sqrt(cumul_diff_log2),'blue')
figs.append(fig)
ffiles.append('cumulative')
ax_conf=plt.subplot(212)
ax_conf.set_title("weights vs max9-min9")
plt.plot(bin_vals[0:-1], cumul_weights,'black')
"""
figs
.
append
(
fig
)
ffiles
.
append
(
'cumulative'
)
pass
#bin_vals[0:-1]
# fig.suptitle("Groud truth confidence")
#
#
#how to allow adjustment before applying tight_layout?
#how to allow adjustment before applying tight_layout?
...
...
nn_eval_lwir_00.py
0 → 100644
View file @
4b02b283
#!/usr/bin/env python3
__copyright__
=
"Copyright 2018, Elphel, Inc."
__license__
=
"GPL-3.0+"
__email__
=
"andrey@elphel.com"
#from PIL import Image
import
os
import
sys
#import glob
#import numpy as np
import
imagej_tiffwriter
import
time
import
matplotlib.pyplot
as
plt
from
matplotlib.backends.backend_pdf
import
PdfPages
import
qcstereo_functions
as
qsf
import
numpy
as
np
#import xml.etree.ElementTree as ET
qsf
.
TIME_START
=
time
.
time
()
qsf
.
TIME_LAST
=
qsf
.
TIME_START
IMG_WIDTH
=
20
# 324 # tiles per image row Defined in config
DEBUG_LEVEL
=
1
try
:
conf_file
=
sys
.
argv
[
1
]
except
IndexError
:
print
(
"Configuration path is required as a first argument. Optional second argument specifies root directory for data files"
)
exit
(
1
)
try
:
root_dir
=
sys
.
argv
[
2
]
except
IndexError
:
root_dir
=
os
.
path
.
dirname
(
conf_file
)
try
:
modes
=
[
sys
.
argv
[
3
]]
# train, infer
except
IndexError
:
modes
=
[
'train'
]
print
(
"Configuration file: "
+
conf_file
)
parameters
,
dirs
,
files
,
dbg_parameters
=
qsf
.
parseXmlConfig
(
conf_file
,
root_dir
)
"""
Temporarily for backward compatibility
"""
if
not
"SLOSS_CLIP"
in
parameters
:
parameters
[
'SLOSS_CLIP'
]
=
0.5
print
(
"Old config, setting SLOSS_CLIP="
,
parameters
[
'SLOSS_CLIP'
])
"""
Defined in config file
"""
TILE_SIDE
,
TILE_LAYERS
,
TWO_TRAINS
,
NET_ARCH1
,
NET_ARCH2
=
[
None
]
*
5
ABSOLUTE_DISPARITY
,
SYM8_SUB
,
WLOSS_LAMBDA
,
SLOSS_LAMBDA
,
SLOSS_CLIP
=
[
None
]
*
5
SPREAD_CONVERGENCE
,
INTER_CONVERGENCE
,
HOR_FLIP
,
DISP_DIFF_CAP
,
DISP_DIFF_SLOPE
=
[
None
]
*
5
CLUSTER_RADIUS
,
ABSOLUTE_DISPARITY
=
[
None
]
*
2
FGBG_MODE
=
1
# 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
FIGS_EXTENSIONS
=
[
'png'
,
'pdf'
,
'svg'
]
#FIGS_ESXTENSIONS = ['png','pdf','svg']
EVAL_MODES
=
[
"train"
,
"infer"
]
FIGS_SAVESHOW
=
[
'save'
,
'show'
]
globals
()
.
update
(
parameters
)
try
:
FIGS_EXTENSIONS
=
globals
()[
'FIGS_ESXTENSIONS'
]
# fixing typo in configs
except
:
pass
#exit(0)
TILE_SIZE
=
TILE_SIDE
*
TILE_SIDE
# == 81
FEATURES_PER_TILE
=
TILE_LAYERS
*
TILE_SIZE
# == 324
BATCH_SIZE
=
([
1
,
2
][
TWO_TRAINS
])
*
2
*
1000
//
25
# == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX
=
(
str
(
NET_ARCH1
)
+
'-'
+
str
(
NET_ARCH2
)
+
([
"R"
,
"A"
][
ABSOLUTE_DISPARITY
])
+
([
"NS"
,
"S8"
][
SYM8_SUB
])
+
"WLAM"
+
str
(
WLOSS_LAMBDA
)
+
"SLAM"
+
str
(
SLOSS_LAMBDA
)
+
"SCLP"
+
str
(
SLOSS_CLIP
)
+
([
'_nG'
,
'_G'
][
SPREAD_CONVERGENCE
])
+
([
'_nI'
,
'_I'
][
INTER_CONVERGENCE
])
+
([
'_nHF'
,
"_HF"
][
HOR_FLIP
])
+
(
'_CP'
+
str
(
DISP_DIFF_CAP
))
+
(
'_S'
+
str
(
DISP_DIFF_SLOPE
))
)
##############################################################################
cluster_size
=
(
2
*
CLUSTER_RADIUS
+
1
)
*
(
2
*
CLUSTER_RADIUS
+
1
)
center_tile_index
=
2
*
CLUSTER_RADIUS
*
(
CLUSTER_RADIUS
+
1
)
qsf
.
prepareFiles
(
dirs
,
files
,
suffix
=
SUFFIX
)
#import tensorflow.contrib.slim as slim
#NN_DISP = 0
#HEUR_DISP = 1
#GT_DISP = 2
#GT_CONF = 3
#NN_NAN = 4
#HEUR_NAN = 5
#NN_DIFF = 6
#HEUR_DIFF = 7
# Now - more layers
CONF_MAX
=
0.7
ERR_AMPL
=
0.3
TIGHT_TOP
=
0.95
TIGHT_HPAD
=
1.0
TIGHT_WPAD
=
1.0
FIGSIZE
=
[
8.5
,
11.0
]
WOI_COLOR
=
"red"
X_COLOR
=
"grey"
X_NEIBS
=
False
TRANSPARENT
=
True
# for export
#dbg_parameters
def
get_fig_params
(
disparity_ranges
):
fig_params
=
[]
for
dr
in
disparity_ranges
:
if
dr
[
-
1
][
0
]
==
'-'
:
fig_params
.
append
(
None
)
else
:
subs
=
[]
for
s
in
dr
[:
-
1
]:
mm
=
s
[:
2
]
try
:
lims
=
s
[
2
]
except
IndexError
:
lims
=
None
subs
.
append
({
'lim_val'
:
mm
,
'lim_xy'
:
lims
})
fig_params
.
append
({
'name'
:
dr
[
-
1
],
'ranges'
:
subs
})
return
fig_params
#try:
fig_params
=
get_fig_params
(
dbg_parameters
[
'disparity_ranges'
])
pass
#temporary:
TIFF_ONLY
=
False
# True
#max_bad = 2.5 # excludes only direct bad
max_bad
=
2.5
#2.5 # 1.5 # excludes only direct bad
max_diff
=
1.5
# 2.0 # 5.0 # maximal max-min difference
max_target_err
=
1.0
# 0.5 # maximal max-min difference
max_disp
=
5.0
min_strength
=
0.18
#ignore tiles below
min_neibs
=
1
max_log_to_mm
=
0.5
# difference between center average and center should be under this fraction of max-min (0 - disables feature)
#num_bins = 256 # number of histogram bins
num_bins
=
15
# 50 # number of histogram bins
use_gt_weights
=
True
# False # True
index_gt
=
2
index_gt_weight
=
3
index_heur_err
=
7
index_nn_err
=
6
index_fgbg_sngl
=
10
index_fgbg_neib
=
11
index_mm
=
23
# 8 # max-min
index_log
=
24
# 9
index_bad
=
25
# 10
index_num_neibs
=
26
# 11
index_fgbg
=
[
index_fgbg_sngl
,
index_fgbg_neib
][
X_NEIBS
]
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
#min_diff = 0.25 # remove all flat tiles with spread less than this (do not show on heuristic/network disparity errors subplots
min_diff
=
0
# remove all flat tiles with spread less than this
max_target_err2
=
max_target_err
*
max_target_err
if
not
'show'
in
FIGS_SAVESHOW
:
plt
.
ioff
()
#for mode in ['train','infer']:
#for mode in ['infer']:
def
cross_out
(
plt
,
cross_out_mask
):
height
=
cross_out_mask
.
shape
[
0
]
width
=
cross_out_mask
.
shape
[
1
]
for
row
in
range
(
height
):
for
col
in
range
(
width
):
if
cross_out_mask
[
row
,
col
]:
xdata
=
[
col
-
0.3
,
col
+
0.3
]
ydata
=
[
row
-
0.3
,
row
+
0.3
]
plt
.
plot
(
xdata
,
ydata
,
color
=
X_COLOR
)
ydata
=
[
row
+
0.3
,
row
-
0.3
]
plt
.
plot
(
xdata
,
ydata
,
color
=
X_COLOR
)
for
mode
in
modes
:
# ['train']:
figs
=
[]
ffiles
=
[]
# no ext
def
setlimsxy
(
lim_xy
):
if
not
lim_xy
is
None
:
plt
.
xlim
(
min
(
lim_xy
[:
2
]),
max
(
lim_xy
[:
2
]))
plt
.
ylim
(
max
(
lim_xy
[
2
:]),
min
(
lim_xy
[
2
:]))
cumul_weights
=
None
for
nfile
,
fpars
in
enumerate
(
fig_params
):
if
not
fpars
is
None
:
img_file
=
files
[
'result'
][
nfile
]
if
mode
==
'infer'
:
img_file
=
img_file
.
replace
(
'.npy'
,
'-infer.npy'
)
"""
try:
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=True)
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
except:
print ("Image file does not exist:", img_file)
continue
"""
pass
data
,
labels
=
qsf
.
result_npy_prepare
(
img_file
,
ABSOLUTE_DISPARITY
,
fix_nan
=
True
,
insert_deltas
=
3
)
if
True
:
#TIFF_ONLY:
tiff_path
=
img_file
.
replace
(
'.npy'
,
'-test.tiff'
)
data
=
data
.
transpose
(
2
,
0
,
1
)
print
(
"Saving results to TIFF: "
+
tiff_path
)
imagej_tiffwriter
.
save
(
tiff_path
,
data
,
labels
=
labels
)
"""
Calculate histograms
"""
err_heur2
=
data
[
index_heur_err
]
*
data
[
index_heur_err
]
err_nn2
=
data
[
index_nn_err
]
*
data
[
index_nn_err
]
diff_log2
=
data
[
index_log
]
*
data
[
index_log
]
weights
=
(
(
data
[
index_gt
]
<
max_disp
)
&
(
err_heur2
<
max_target_err2
)
&
(
data
[
index_bad
]
<
max_bad
)
&
(
data
[
index_gt_weight
]
>=
min_strength
)
&
(
data
[
index_num_neibs
]
>=
min_neibs
)
&
#max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
(
data
[
index_log
]
<
max_log_to_mm
*
np
.
sqrt
(
data
[
index_mm
])
)
)
.
astype
(
data
.
dtype
)
# 0.0/1.1
#max_disp
#max_target_err
if
use_gt_weights
:
weights
*=
data
[
index_gt_weight
]
mm
=
data
[
index_mm
]
weh
=
np
.
nan_to_num
(
weights
*
err_heur2
)
wen
=
np
.
nan_to_num
(
weights
*
err_nn2
)
wel
=
np
.
nan_to_num
(
weights
*
diff_log2
)
hist_weights
,
bin_vals
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
weights
,
density
=
False
)
hist_err_heur2
,
_
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
weh
,
density
=
False
)
hist_err_nn2
,
_
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
wen
,
density
=
False
)
hist_diff_log2
,
_
=
np
.
histogram
(
a
=
mm
,
bins
=
num_bins
,
range
=
(
0.0
,
max_diff
),
weights
=
wel
,
density
=
False
)
if
cumul_weights
is
None
:
cumul_weights
=
hist_weights
cumul_err_heur2
=
hist_err_heur2
cumul_err_nn2
=
hist_err_nn2
cumul_diff_log2
=
hist_diff_log2
else
:
cumul_weights
+=
hist_weights
cumul_err_heur2
+=
hist_err_heur2
cumul_err_nn2
+=
hist_err_nn2
cumul_diff_log2
+=
hist_diff_log2
hist_err_heur2
=
np
.
nan_to_num
(
hist_err_heur2
/
hist_weights
)
hist_err_nn2
=
np
.
nan_to_num
(
hist_err_nn2
/
hist_weights
)
hist_gain2
=
np
.
nan_to_num
(
hist_err_heur2
/
hist_err_nn2
)
hist_gain
=
np
.
sqrt
(
hist_gain2
)
hist_diff_log2
=
np
.
nan_to_num
(
hist_diff_log2
/
hist_weights
)
print
(
"hist_err_heur2"
,
end
=
" "
)
print
(
np
.
sqrt
(
hist_err_heur2
))
print
(
"hist_err_nn2"
,
end
=
" "
)
print
(
np
.
sqrt
(
hist_err_nn2
))
print
(
"hist_gain"
,
end
=
" "
)
print
(
hist_gain
)
print
(
"hist_diff_log2"
,
end
=
" "
)
print
(
np
.
sqrt
(
hist_diff_log2
))
if
min_diff
>
0.0
:
pass
good
=
(
mm
>
min_diff
)
.
astype
(
mm
.
dtype
)
good
/=
good
# good - 1, bad - nan
data
[
index_heur_err
]
*=
good
data
[
index_nn_err
]
*=
good
data
=
data
.
transpose
(
1
,
2
,
0
)
if
TIFF_ONLY
:
continue
cross_out_mask
=
data
[
...
,
index_fgbg
]
<
0.5
#data.shape = (15,20,27)
for
subindex
,
rng
in
enumerate
(
fpars
[
'ranges'
]):
lim_val
=
rng
[
'lim_val'
]
lim_xy
=
rng
[
'lim_xy'
]
fig
=
plt
.
figure
(
figsize
=
FIGSIZE
)
fig
.
canvas
.
set_window_title
(
fpars
[
'name'
])
fig
.
suptitle
(
fpars
[
'name'
])
ax_conf
=
plt
.
subplot
(
322
)
ax_conf
.
set_title
(
"Ground truth confidence"
)
# fig.suptitle("Groud truth confidence")
plt
.
imshow
(
data
[
...
,
qsf
.
GT_CONF
],
vmin
=
0
,
vmax
=
CONF_MAX
,
cmap
=
'gray'
)
if
not
lim_xy
is
None
:
pass
# show frame
xdata
=
[
min
(
lim_xy
[:
2
]),
max
(
lim_xy
[:
2
]),
max
(
lim_xy
[:
2
]),
min
(
lim_xy
[:
2
]),
min
(
lim_xy
[:
2
])]
ydata
=
[
min
(
lim_xy
[
2
:]),
min
(
lim_xy
[
2
:]),
max
(
lim_xy
[
2
:]),
max
(
lim_xy
[
2
:]),
min
(
lim_xy
[
2
:])]
plt
.
plot
(
xdata
,
ydata
,
color
=
WOI_COLOR
)
# setlimsxy(lim_xy)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_gtd
=
plt
.
subplot
(
321
)
ax_gtd
.
set_title
(
"Ground truth disparity map"
)
plt
.
imshow
(
data
[
...
,
qsf
.
GT_DISP
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
])
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_hed
=
plt
.
subplot
(
323
)
ax_hed
.
set_title
(
"Heuristic disparity map"
)
plt
.
imshow
(
data
[
...
,
qsf
.
HEUR_NAN
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
])
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_nnd
=
plt
.
subplot
(
325
)
ax_nnd
.
set_title
(
"Network disparity output"
)
plt
.
imshow
(
data
[
...
,
qsf
.
NN_NAN
],
vmin
=
lim_val
[
0
],
vmax
=
lim_val
[
1
])
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_hee
=
plt
.
subplot
(
324
)
ax_hee
.
set_title
(
"Heuristic disparity error"
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
imshow
(
data
[
...
,
qsf
.
HEUR_DIFF
],
vmin
=-
ERR_AMPL
,
vmax
=
ERR_AMPL
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
ax_nne
=
plt
.
subplot
(
326
)
ax_nne
.
set_title
(
"Network disparity error"
)
plt
.
imshow
(
data
[
...
,
qsf
.
NN_DIFF
],
vmin
=-
ERR_AMPL
,
vmax
=
ERR_AMPL
)
setlimsxy
(
lim_xy
)
cross_out
(
plt
,
cross_out_mask
)
plt
.
colorbar
(
orientation
=
'vertical'
)
# location='bottom')
plt
.
tight_layout
(
rect
=
[
0
,
0
,
1
,
TIGHT_TOP
],
h_pad
=
TIGHT_HPAD
,
w_pad
=
TIGHT_WPAD
)
figs
.
append
(
fig
)
fb_noext
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
img_file
))[
0
]
#
if
subindex
>
0
:
if
subindex
<
10
:
fb_noext
+=
"abcdefghi"
[
subindex
-
1
]
else
:
fb_noext
+=
"-"
+
str
(
subindex
)
ffiles
.
append
(
fb_noext
)
pass
if
False
:
# True:
cumul_err_heur2
=
np
.
nan_to_num
(
cumul_err_heur2
/
cumul_weights
)
cumul_err_nn2
=
np
.
nan_to_num
(
cumul_err_nn2
/
cumul_weights
)
cumul_gain2
=
np
.
nan_to_num
(
cumul_err_heur2
/
cumul_err_nn2
)
cumul_gain
=
np
.
sqrt
(
cumul_gain2
)
cumul_diff_log2
=
np
.
nan_to_num
(
cumul_diff_log2
/
cumul_weights
)
print
(
"cumul_weights"
,
end
=
" "
)
print
(
cumul_weights
)
print
(
"cumul_err_heur"
,
end
=
" "
)
print
(
np
.
sqrt
(
cumul_err_heur2
))
print
(
"cumul_err_nn"
,
end
=
" "
)
print
(
np
.
sqrt
(
cumul_err_nn2
))
print
(
"cumul_gain"
,
end
=
" "
)
print
(
cumul_gain
)
print
(
"cumul_diff_log2"
,
end
=
" "
)
print
(
np
.
sqrt
(
cumul_diff_log2
))
fig
,
ax1
=
plt
.
subplots
()
ax1
.
set_xlabel
(
'3x3 tiles ground truth disparity max-min (pix)'
)
ax1
.
set_ylabel
(
'RMSE
\n
(pix)'
,
color
=
'black'
,
rotation
=
'horizontal'
)
ax1
.
yaxis
.
set_label_coords
(
-
0.045
,
0.92
)
ax1
.
plot
(
bin_vals
[
0
:
-
1
],
np
.
sqrt
(
cumul_err_nn2
),
'tab:red'
,
label
=
"network disparity RMSE"
)
ax1
.
plot
(
bin_vals
[
0
:
-
1
],
np
.
sqrt
(
cumul_err_heur2
),
'tab:green'
,
label
=
"heuristic disparity RMSE"
)
ax1
.
plot
(
bin_vals
[
0
:
-
1
],
np
.
sqrt
(
cumul_diff_log2
),
'tab:cyan'
,
label
=
"ground truth LoG"
)
ax1
.
tick_params
(
axis
=
'y'
,
labelcolor
=
'black'
)
ax2
=
ax1
.
twinx
()
# instantiate a second axes that shares the same x-axis
ax2
.
set_ylabel
(
'weight'
,
color
=
'black'
,
rotation
=
'horizontal'
)
# we already handled the x-label with ax1
ax2
.
yaxis
.
set_label_coords
(
1.06
,
1.0
)
ax2
.
plot
(
bin_vals
[
0
:
-
1
],
cumul_weights
,
color
=
'grey'
,
dashes
=
[
6
,
2
],
label
=
'weights = n_tiles * gt_confidence'
)
ax1
.
legend
(
loc
=
"upper left"
,
bbox_to_anchor
=
(
0.2
,
1.0
))
ax2
.
legend
(
loc
=
"lower right"
,
bbox_to_anchor
=
(
1.0
,
0.1
))
"""
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title('Cumulative')
fig.suptitle('Difference to GT')
# ax_conf=plt.subplot(322)
ax_conf=plt.subplot(211)
ax_conf.set_title("RMS vs max9-min9")
plt.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2),'red',
bin_vals[0:-1], np.sqrt(cumul_err_nn2),'green',
bin_vals[0:-1], np.sqrt(cumul_diff_log2),'blue')
figs.append(fig)
ffiles.append('cumulative')
ax_conf=plt.subplot(212)
ax_conf.set_title("weights vs max9-min9")
plt.plot(bin_vals[0:-1], cumul_weights,'black')
"""
figs
.
append
(
fig
)
ffiles
.
append
(
'cumulative'
)
pass
#bin_vals[0:-1]
# fig.suptitle("Groud truth confidence")
#
#how to allow adjustment before applying tight_layout?
pass
for
fig
in
figs
:
fig
.
tight_layout
(
rect
=
[
0
,
0
,
1
,
TIGHT_TOP
],
h_pad
=
TIGHT_HPAD
,
w_pad
=
TIGHT_WPAD
)
if
FIGS_EXTENSIONS
and
figs
and
'save'
in
FIGS_SAVESHOW
:
try
:
print
(
"Creating output directory for figures: "
,
dirs
[
'figures'
])
os
.
makedirs
(
dirs
[
'figures'
])
except
:
pass
pp
=
None
if
'pdf'
in
FIGS_EXTENSIONS
:
if
mode
==
'infer'
:
pdf_path
=
os
.
path
.
join
(
dirs
[
'figures'
],
"figures-infer
%
s.pdf"
%
str
(
min_diff
))
else
:
pdf_path
=
os
.
path
.
join
(
dirs
[
'figures'
],
"figures-train
%
s.pdf"
%
str
(
min_diff
))
pp
=
PdfPages
(
pdf_path
)
for
fb_noext
,
fig
in
zip
(
ffiles
,
figs
):
for
ext
in
FIGS_EXTENSIONS
:
if
ext
==
'pdf'
:
pass
fig
.
savefig
(
pp
,
format
=
'pdf'
)
else
:
if
mode
==
'infer'
:
noext
=
fb_noext
+
'-infer'
else
:
noext
=
fb_noext
+
'-train'
fig
.
savefig
(
fname
=
os
.
path
.
join
(
dirs
[
'figures'
],
noext
+
"."
+
ext
),
transparent
=
TRANSPARENT
,
)
pass
if
pp
:
pp
.
close
()
if
'show'
in
FIGS_SAVESHOW
:
plt
.
show
()
#FIGS_ESXTENSIONS
#qsf.evaluateAllResults(result_files = files['result'],
# absolute_disparity = ABSOLUTE_DISPARITY,
# cluster_radius = CLUSTER_RADIUS)
print
(
"All done"
)
exit
(
0
)
qcstereo_network.py
View file @
4b02b283
...
@@ -110,21 +110,21 @@ def network_sub(input_tensor,
...
@@ -110,21 +110,21 @@ def network_sub(input_tensor,
fc_sym
.
append
(
slim
.
fully_connected
(
inp8
[
j
],
num_sym8
,
activation_fn
=
lrelu
,
scope
=
scp
,
reuse
=
reuse_this
))
fc_sym
.
append
(
slim
.
fully_connected
(
inp8
[
j
],
num_sym8
,
activation_fn
=
lrelu
,
scope
=
scp
,
reuse
=
reuse_this
))
if
not
reuse_this
:
if
not
reuse_this
:
with
tf
.
compat
.
v1
.
variable_scope
(
scp
,
reuse
=
True
)
:
# tf.AUTO_REUSE):
with
tf
.
compat
.
v1
.
variable_scope
(
scp
,
reuse
=
True
)
:
# tf.AUTO_REUSE):
inp_weights
.
append
(
tf
.
get_variable
(
'weights'
))
# ,shape=[inp.shape[1],num_outs]))
inp_weights
.
append
(
tf
.
compat
.
v1
.
get_variable
(
'weights'
))
# ,shape=[inp.shape[1],num_outs]))
if
num_non_sum
>
0
:
if
num_non_sum
>
0
:
reuse_this
=
reuse
reuse_this
=
reuse
scp
=
'g_fc_sub'
+
str
(
i
)
+
"r"
scp
=
'g_fc_sub'
+
str
(
i
)
+
"r"
fc_sym
.
append
(
slim
.
fully_connected
(
inp
,
num_non_sum
,
activation_fn
=
lrelu
,
scope
=
scp
,
reuse
=
reuse_this
))
fc_sym
.
append
(
slim
.
fully_connected
(
inp
,
num_non_sum
,
activation_fn
=
lrelu
,
scope
=
scp
,
reuse
=
reuse_this
))
if
not
reuse_this
:
if
not
reuse_this
:
with
tf
.
compat
.
v1
.
variable_scope
(
scp
,
reuse
=
True
)
:
# tf.AUTO_REUSE):
with
tf
.
compat
.
v1
.
variable_scope
(
scp
,
reuse
=
True
)
:
# tf.AUTO_REUSE):
inp_weights
.
append
(
tf
.
get_variable
(
'weights'
))
# ,shape=[inp.shape[1],num_outs]))
inp_weights
.
append
(
tf
.
compat
.
v1
.
get_variable
(
'weights'
))
# ,shape=[inp.shape[1],num_outs]))
fc
.
append
(
tf
.
concat
(
fc_sym
,
1
,
name
=
'sym_input_layer'
))
fc
.
append
(
tf
.
concat
(
fc_sym
,
1
,
name
=
'sym_input_layer'
))
else
:
else
:
scp
=
'g_fc_sub'
+
str
(
i
)
scp
=
'g_fc_sub'
+
str
(
i
)
fc
.
append
(
slim
.
fully_connected
(
inp
,
num_outs
,
activation_fn
=
lrelu
,
scope
=
scp
,
reuse
=
reuse
))
fc
.
append
(
slim
.
fully_connected
(
inp
,
num_outs
,
activation_fn
=
lrelu
,
scope
=
scp
,
reuse
=
reuse
))
if
not
reuse
:
if
not
reuse
:
with
tf
.
compat
.
v1
.
variable_scope
(
scp
,
reuse
=
True
)
:
# tf.AUTO_REUSE):
with
tf
.
compat
.
v1
.
variable_scope
(
scp
,
reuse
=
True
)
:
# tf.AUTO_REUSE):
inp_weights
.
append
(
tf
.
get_variable
(
'weights'
))
# ,shape=[inp.shape[1],num_outs]))
inp_weights
.
append
(
tf
.
compat
.
v1
.
get_variable
(
'weights'
))
# ,shape=[inp.shape[1],num_outs]))
return
fc
[
-
1
],
inp_weights
return
fc
[
-
1
],
inp_weights
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment