pax_global_header 0000666 0000000 0000000 00000000064 13517677053 0014530 g ustar 00root root 0000000 0000000 52 comment=4b02b28381749fc6d3eff15ceb0dccaa4f2e606b
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/ 0000775 0000000 0000000 00000000000 13517677053 0020405 5 ustar 00root root 0000000 0000000 lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/.gitignore 0000664 0000000 0000000 00000000060 13517677053 0022371 0 ustar 00root root 0000000 0000000 __pycache__
/.project
/.pydevproject
attic
*.log lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/README.md 0000664 0000000 0000000 00000000036 13517677053 0021663 0 ustar 00root root 0000000 0000000 # lwir-nn
NN for LWIR 3D data lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/eclipse_project_setup/ 0000775 0000000 0000000 00000000000 13517677053 0024777 5 ustar 00root root 0000000 0000000 lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/eclipse_project_setup/.project 0000664 0000000 0000000 00000000551 13517677053 0026447 0 ustar 00root root 0000000 0000000
lwir-nn
org.python.pydev.PyDevBuilder
org.python.pydev.pythonNature
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/eclipse_project_setup/.pydevproject 0000664 0000000 0000000 00000001132 13517677053 0027513 0 ustar 00root root 0000000 0000000
Default
python interpreter
/${PROJECT_DIR_NAME}
/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data10.py 0000664 0000000 0000000 00000326541 13517677053 0023602 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR #TFR directory
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list:
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
d_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] # still consider FG to be the real ground truth
s_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate(
(img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1))
,1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode): #0 - average, 1 - FG, 2 - BG, 3 - AUX
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train2'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test2'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_5'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml32b*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TESTS = [1,3] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data11.py 0000664 0000000 0000000 00000334665 13517677053 0023611 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, #TFR directory
rms_ratio_split = None# Fixing Java export that splits near horizontal surface in bg/fg
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list: # all correlation files
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
'''
Fix bug in the exported data - merge FG/BG back if rms/rms_split < rms_ratio_split
'''
if not rms_ratio_split is None:
merge = img_gt_aux.image[...,ijt.IJFGBG.RMS]/(img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
keep_split = np.logical_not(merge)
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.FG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.FG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.BG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.BG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.RMS],img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]])
# nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
d_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] # still consider FG to be the real ground truth
s_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_STR].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
),1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode, #0 - average, 1 - FG, 2 - BG, 3 - AUX
rms_ratio_split = None): # fixing bug in exported data - use rms_ratio_split = 14.0
if not rms_ratio_split is None:
merge = gtaux[...,ijt.IJFGBG.RMS]/(gtaux[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
keep_split = np.logical_not(merge)
gtaux[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.FG_DISP]])
gtaux[..., ijt.IJFGBG.FG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.FG_STR]])
gtaux[..., ijt.IJFGBG.BG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.BG_DISP]])
gtaux[...,ijt.IJFGBG.BG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.BG_STR]])
gtaux[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.RMS], gtaux[...,ijt.IJFGBG.RMS_SPLIT]])
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = 14.0,
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rms_merge_ratio = rms_merge_ratio # fixing exported data bug
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train3'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test3'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_6'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml32b*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TESTS = [1,3] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RMS_MERGE_RATIO = 14.0 # fixing bug in exported data - merging FG/BG for near horizontal surfaces
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, # TFR directory
RMS_MERGE_RATIO) # fixing bug - merging FG+BG for horizontal surfaces
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = RMS_MERGE_RATIO,
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data12.py 0000664 0000000 0000000 00000334665 13517677053 0023612 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, #TFR directory
rms_ratio_split = None# Fixing Java export that splits near horizontal surface in bg/fg
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list: # all correlation files
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
'''
Fix bug in the exported data - merge FG/BG back if rms/rms_split < rms_ratio_split
'''
if not rms_ratio_split is None:
merge = img_gt_aux.image[...,ijt.IJFGBG.RMS]/(img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
keep_split = np.logical_not(merge)
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.FG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.FG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.BG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.BG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.RMS],img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]])
# nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
d_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] # still consider FG to be the real ground truth
s_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_STR].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
),1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode, #0 - average, 1 - FG, 2 - BG, 3 - AUX
rms_ratio_split = None): # fixing bug in exported data - use rms_ratio_split = 14.0
if not rms_ratio_split is None:
merge = gtaux[...,ijt.IJFGBG.RMS]/(gtaux[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
keep_split = np.logical_not(merge)
gtaux[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.FG_DISP]])
gtaux[..., ijt.IJFGBG.FG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.FG_STR]])
gtaux[..., ijt.IJFGBG.BG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.BG_DISP]])
gtaux[...,ijt.IJFGBG.BG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.BG_STR]])
gtaux[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.RMS], gtaux[...,ijt.IJFGBG.RMS_SPLIT]])
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = 14.0,
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rms_merge_ratio = rms_merge_ratio # fixing exported data bug
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train4'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test4'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_7'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml32b*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 16 # 8
FGBGMODE_TESTS = [1,3] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RMS_MERGE_RATIO = 14.0 # fixing bug in exported data - merging FG/BG for near horizontal surfaces
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, # TFR directory
RMS_MERGE_RATIO) # fixing bug - merging FG+BG for horizontal surfaces
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = RMS_MERGE_RATIO,
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data13.py 0000664 0000000 0000000 00000341042 13517677053 0023576 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity, 4 - use FG/BG closest to AUX
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, #TFR directory
rms_ratio_split = None# Fixing Java export that splits near horizontal surface in bg/fg
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list: # all correlation files
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
'''
Fix bug in the exported data - merge FG/BG back if rms/rms_split < rms_ratio_split
'''
if not rms_ratio_split is None:
merge = img_gt_aux.image[...,ijt.IJFGBG.RMS]/(img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
keep_split = np.logical_not(merge)
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.FG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.FG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.BG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.BG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.RMS],img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]])
# nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
if (export_mode == 4) or (export_mode == 3):
#1) replace nan in aux with average gt
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
aux_nan = np.isnan(img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY], img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]])
use_fg = np.abs(img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] - disparity)
d_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_DISP], img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]]
)
s_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_STR], img_gt_aux.image[...,ijt.IJFGBG.BG_STR]]
)
if (export_mode == 4):
disparity = d_gt
strength = s_gt
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_STR].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
),1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode, #0 - average, 1 - FG, 2 - BG, 3 - AUX, 4 select FG/BG closest to aux
rms_ratio_split = None): # fixing bug in exported data - use rms_ratio_split = 14.0
if not rms_ratio_split is None:
merge = gtaux[...,ijt.IJFGBG.RMS]/(gtaux[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
keep_split = np.logical_not(merge)
gtaux[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.FG_DISP]])
gtaux[..., ijt.IJFGBG.FG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.FG_STR]])
gtaux[..., ijt.IJFGBG.BG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.BG_DISP]])
gtaux[...,ijt.IJFGBG.BG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.BG_STR]])
gtaux[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.RMS], gtaux[...,ijt.IJFGBG.RMS_SPLIT]])
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
elif mode == 4:
# strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
#1) replace nan in aux with average gt
aux_nan = np.isnan(gtaux[:,:,:,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.AUX_DISP]])
#select FG/BG that is closest to AUX disparity (or DISPARITY if AUX undefined)
use_fg = np.abs(gtaux[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(gtaux[...,ijt.IJFGBG.BG_DISP] - disparity)
ds_pair[:,:,:,0] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_DISP], gtaux[:,:,:,ijt.IJFGBG.BG_DISP]]
)
ds_pair[:,:,:,1] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_STR], gtaux[:,:,:,ijt.IJFGBG.BG_STR]]
)
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = 14.0,
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rms_merge_ratio = rms_merge_ratio # fixing exported data bug
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train5'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test5'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_8'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml32b*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = 102 # None # put number of test scenes to output (used when making test only from few or single test file
FIXED_TRAIN_LENGTH = 409 # None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32# 16 # 8
FGBGMODE_TESTS = [4] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 4 # 1 # 0 - average, 1 - FG, 2 - BG, 4 - FG/BG closest to AUX
RND_AMPLITUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RMS_MERGE_RATIO = 14.0 # fixing bug in exported data - merging FG/BG for near horizontal surfaces
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLITUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, # TFR directory
RMS_MERGE_RATIO) # fixing bug - merging FG+BG for horizontal surfaces
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = RMS_MERGE_RATIO,
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if FIXED_TRAIN_LENGTH is None:
num_train_scenes = len(ex_data.files_train)
else:
num_train_scenes = FIXED_TRAIN_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data14.py 0000664 0000000 0000000 00000342114 13517677053 0023600 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity, 4 - use FG/BG closest to AUX
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, #TFR directory
rms_ratio_split = None# Fixing Java export that splits near horizontal surface in bg/fg
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list: # all correlation files
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
'''
Fix bug in the exported data - merge FG/BG back if rms/rms_split < rms_ratio_split
'''
if not rms_ratio_split is None: # should be 3.0 < rms_ratio_split < 5.8)
# merge = img_gt_aux.image[...,ijt.IJFGBG.RMS]/(img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
dmin = 0.5
merge = (img_gt_aux.image[...,ijt.IJFGBG.RMS] <
(np.minimum(np.nan_to_num(img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]), dmin) * img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] * rms_ratio_split))
keep_split = np.logical_not(merge)
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.FG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.FG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.BG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.BG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.RMS],img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]])
# nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
if (export_mode == 4) or (export_mode == 3):
#1) replace nan in aux with average gt
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
aux_nan = np.isnan(img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY], img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]])
use_fg = np.abs(img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] - disparity)
d_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_DISP], img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]]
)
s_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_STR], img_gt_aux.image[...,ijt.IJFGBG.BG_STR]]
)
if (export_mode == 4):
disparity = d_gt
strength = s_gt
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
),1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode, #0 - average, 1 - FG, 2 - BG, 3 - AUX, 4 select FG/BG closest to aux
rms_ratio_split = None): # fixing bug in exported data - use rms_ratio_split = 4.0
if not rms_ratio_split is None:
## merge = gtaux[...,ijt.IJFGBG.RMS]/(gtaux[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
dmin = 0.5
merge = (gtaux[...,ijt.IJFGBG.RMS] <
(np.minimum(np.nan_to_num(gtaux[...,ijt.IJFGBG.DISPARITY]), dmin) *
gtaux[...,ijt.IJFGBG.RMS_SPLIT] *
rms_ratio_split))
keep_split = np.logical_not(merge)
gtaux[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.FG_DISP]])
gtaux[..., ijt.IJFGBG.FG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.FG_STR]])
gtaux[..., ijt.IJFGBG.BG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.BG_DISP]])
gtaux[...,ijt.IJFGBG.BG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.BG_STR]])
gtaux[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.RMS], gtaux[...,ijt.IJFGBG.RMS_SPLIT]])
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
elif mode == 4:
# strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
#1) replace nan in aux with average gt
aux_nan = np.isnan(gtaux[:,:,:,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.AUX_DISP]])
#select FG/BG that is closest to AUX disparity (or DISPARITY if AUX undefined)
use_fg = np.abs(gtaux[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(gtaux[...,ijt.IJFGBG.BG_DISP] - disparity)
ds_pair[:,:,:,0] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_DISP], gtaux[:,:,:,ijt.IJFGBG.BG_DISP]]
)
ds_pair[:,:,:,1] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_STR], gtaux[:,:,:,ijt.IJFGBG.BG_STR]]
)
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = 14.0,
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rms_merge_ratio = rms_merge_ratio # fixing exported data bug
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train5'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test5'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_9'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml32b*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = 102 # None # put number of test scenes to output (used when making test only from few or single test file
FIXED_TRAIN_LENGTH = 409 # None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32# 16 # 8
FGBGMODE_TESTS = [4] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 4 # 1 # 0 - average, 1 - FG, 2 - BG, 4 - FG/BG closest to AUX
RND_AMPLITUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RMS_MERGE_RATIO = 4.0 # fixing bug in exported data - merging FG/BG for near horizontal surfaces (3.0 < RMS_MERGE_RATIO <5.8)
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLITUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, # TFR directory
RMS_MERGE_RATIO) # fixing bug - merging FG+BG for horizontal surfaces
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = RMS_MERGE_RATIO,
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if FIXED_TRAIN_LENGTH is None:
num_train_scenes = len(ex_data.files_train)
else:
num_train_scenes = FIXED_TRAIN_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data15.py 0000664 0000000 0000000 00000343655 13517677053 0023614 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity, 4 - use FG/BG closest to AUX
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, #TFR directory
rms_ratio_split = None# Fixing Java export that splits near horizontal surface in bg/fg
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list: # all correlation files
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
'''
Fix bug in the exported data - merge FG/BG back if rms/rms_split < rms_ratio_split
'''
if not rms_ratio_split is None: # should be 3.0 < rms_ratio_split < 5.8)
# merge = img_gt_aux.image[...,ijt.IJFGBG.RMS]/(img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
dmin = 0.5
merge = (img_gt_aux.image[...,ijt.IJFGBG.RMS] <
(np.minimum(np.nan_to_num(img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]), dmin) * img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] * rms_ratio_split))
keep_split = np.logical_not(merge)
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.FG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.FG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.BG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.BG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.RMS],img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]])
# nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
if (export_mode == 4) or (export_mode == 3):
#1) replace nan in aux with average gt
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
aux_nan = np.isnan(img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY], img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]])
use_fg = np.abs(img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] - disparity)
d_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_DISP], img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]]
)
s_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_STR], img_gt_aux.image[...,ijt.IJFGBG.BG_STR]]
)
if (export_mode == 4):
disparity = d_gt
strength = s_gt
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
),1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode, #0 - average, 1 - FG, 2 - BG, 3 - AUX, 4 select FG/BG closest to aux
rms_ratio_split = None): # fixing bug in exported data - use rms_ratio_split = 4.0
if not rms_ratio_split is None:
## merge = gtaux[...,ijt.IJFGBG.RMS]/(gtaux[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
dmin = 0.5
merge = (gtaux[...,ijt.IJFGBG.RMS] <
(np.minimum(np.nan_to_num(gtaux[...,ijt.IJFGBG.DISPARITY]), dmin) *
gtaux[...,ijt.IJFGBG.RMS_SPLIT] *
rms_ratio_split))
keep_split = np.logical_not(merge)
gtaux[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.FG_DISP]])
gtaux[..., ijt.IJFGBG.FG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.FG_STR]])
gtaux[..., ijt.IJFGBG.BG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.BG_DISP]])
gtaux[...,ijt.IJFGBG.BG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.BG_STR]])
gtaux[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.RMS], gtaux[...,ijt.IJFGBG.RMS_SPLIT]])
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
elif mode == 4:
# strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
#1) replace nan in aux with average gt
aux_nan = np.isnan(gtaux[:,:,:,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.AUX_DISP]])
#select FG/BG that is closest to AUX disparity (or DISPARITY if AUX undefined)
use_fg = np.abs(gtaux[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(gtaux[...,ijt.IJFGBG.BG_DISP] - disparity)
ds_pair[:,:,:,0] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_DISP], gtaux[:,:,:,ijt.IJFGBG.BG_DISP]]
)
ds_pair[:,:,:,1] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_STR], gtaux[:,:,:,ijt.IJFGBG.BG_STR]]
)
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = 14.0,
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rms_merge_ratio = rms_merge_ratio # fixing exported data bug
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train6'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test6'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_10'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml32b*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
'''
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test6/1562390317_693673/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390318_833313/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390326_354823/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390331_483132/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390333_192523/v01/ml32", # lena
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32", # near trees, olga
]
'''
test_sets = [
"/data_ssd/lwir_sets/lwir_test6/1562390317_693673/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390318_833313/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390326_354823/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390331_483132/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390333_192523/v01/ml32", # lena
]
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = 102 # None # put number of test scenes to output (used when making test only from few or single test file
FIXED_TRAIN_LENGTH = 409 # None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32# 16 # 8
FGBGMODE_TESTS = [4] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 4 # 1 # 0 - average, 1 - FG, 2 - BG, 4 - FG/BG closest to AUX
RND_AMPLITUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RMS_MERGE_RATIO = 4.0 # fixing bug in exported data - merging FG/BG for near horizontal surfaces (3.0 < RMS_MERGE_RATIO <5.8)
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLITUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, # TFR directory
RMS_MERGE_RATIO) # fixing bug - merging FG+BG for horizontal surfaces
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = RMS_MERGE_RATIO,
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if FIXED_TRAIN_LENGTH is None:
num_train_scenes = len(ex_data.files_train)
else:
num_train_scenes = FIXED_TRAIN_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data16.py 0000664 0000000 0000000 00000320420 13517677053 0023576 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity, 4 - use FG/BG closest to AUX
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, #TFR directory
rms_ratio_split = None# Fixing Java export that splits near horizontal surface in bg/fg
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list: # all correlation files
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
'''
Fix bug in the exported data - merge FG/BG back if rms/rms_split < rms_ratio_split
'''
if not rms_ratio_split is None: # should be 3.0 < rms_ratio_split < 5.8)
# merge = img_gt_aux.image[...,ijt.IJFGBG.RMS]/(img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
dmin = 0.5
merge = (img_gt_aux.image[...,ijt.IJFGBG.RMS] <
(np.minimum(np.nan_to_num(img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]), dmin) * img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] * rms_ratio_split))
keep_split = np.logical_not(merge)
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.FG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.FG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY],img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]])
img_gt_aux.image[...,ijt.IJFGBG.BG_STR] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.STRENGTH],img_gt_aux.image[...,ijt.IJFGBG.BG_STR]])
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge,keep_split],
[img_gt_aux.image[...,ijt.IJFGBG.RMS],img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT]])
# nn_disparity = np.nan_to_num(rslt[...,0], copy = False)
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
if (export_mode == 4) or (export_mode == 3):
#1) replace nan in aux with average gt
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
aux_nan = np.isnan(img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[img_gt_aux.image[...,ijt.IJFGBG.DISPARITY], img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]])
use_fg = np.nan_to_num(np.abs(img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] - disparity)) < np.nan_to_num(np.abs(img_gt_aux.image[...,ijt.IJFGBG.BG_DISP] - disparity))
d_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_DISP], img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]]
)
s_gt = np.select(
[use_fg, np.logical_not(use_fg)],
[img_gt_aux.image[...,ijt.IJFGBG.FG_STR], img_gt_aux.image[...,ijt.IJFGBG.BG_STR]]
)
if (export_mode == 4):
disparity = d_gt
strength = s_gt
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate((
img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.FG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.BG_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1)
),1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_names = ["ml32b","ml32"]):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
for ml_name in ml_names:
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
if len(sweep_list):
break # found existing and non-empty ML directory
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode, #0 - average, 1 - FG, 2 - BG, 3 - AUX, 4 select FG/BG closest to aux
rms_ratio_split = None): # fixing bug in exported data - use rms_ratio_split = 4.0
if not rms_ratio_split is None:
## merge = gtaux[...,ijt.IJFGBG.RMS]/(gtaux[...,ijt.IJFGBG.RMS_SPLIT]+1e-6) < rms_ratio_split
dmin = 0.5
merge = (gtaux[...,ijt.IJFGBG.RMS] <
(np.minimum(np.nan_to_num(gtaux[...,ijt.IJFGBG.DISPARITY]), dmin) *
gtaux[...,ijt.IJFGBG.RMS_SPLIT] *
rms_ratio_split))
keep_split = np.logical_not(merge)
gtaux[...,ijt.IJFGBG.FG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.FG_DISP]])
gtaux[..., ijt.IJFGBG.FG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.FG_STR]])
gtaux[..., ijt.IJFGBG.BG_DISP] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.BG_DISP]])
gtaux[...,ijt.IJFGBG.BG_STR] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.STRENGTH], gtaux[...,ijt.IJFGBG.BG_STR]])
gtaux[...,ijt.IJFGBG.RMS_SPLIT] = np.select(
[merge, keep_split],
[gtaux[...,ijt.IJFGBG.RMS], gtaux[...,ijt.IJFGBG.RMS_SPLIT]])
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
elif mode == 4:
# strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
#1) replace nan in aux with average gt
aux_nan = np.isnan(gtaux[:,:,:,ijt.IJFGBG.AUX_DISP])
disparity = np.select(
[aux_nan, np.logical_not(aux_nan)],
[gtaux[...,ijt.IJFGBG.DISPARITY], gtaux[...,ijt.IJFGBG.AUX_DISP]])
#select FG/BG that is closest to AUX disparity (or DISPARITY if AUX undefined)
use_fg = np.abs(gtaux[...,ijt.IJFGBG.FG_DISP] - disparity) < np.abs(gtaux[...,ijt.IJFGBG.BG_DISP] - disparity)
ds_pair[:,:,:,0] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_DISP], gtaux[:,:,:,ijt.IJFGBG.BG_DISP]]
)
ds_pair[:,:,:,1] = np.select(
[use_fg, np.logical_not(use_fg)],
[gtaux[:,:,:,ijt.IJFGBG.FG_STR], gtaux[:,:,:,ijt.IJFGBG.BG_STR]]
)
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
# print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdirs, #["ml32b","ml32"] # subdirectory with the ML disparity sweep files (use [0] first!)
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = 14.0,
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdirs = ml_subdirs
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rms_merge_ratio = rms_merge_ratio # fixing exported data bug
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode, self.rms_merge_ratio)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode, self.rms_merge_ratio)
## self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
## self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdirs)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdirs)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
topdir_train = '/data_ssd/lwir_sets/lwir_train6'# ''
try:
topdir_test = sys.argv[2]
except IndexError:
topdir_test = '/data_ssd/lwir_sets/lwir_test6'
try:
pathTFR = sys.argv[3]
except IndexError:
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_11'
# not currently used. Will need to accept lists, not just single dirs
try:
ml_subdir = sys.argv[4]
except IndexError:
ml_subdir = "ml32*" # use latest!
try:
ml_pattern = sys.argv[5]
except IndexError:
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
test_corrs = []
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32b", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32b", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32b", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32b", # 2 trees
"/data_ssd/lwir_sets/lwir_test6/1562390251_025390/v01/ml32b", # empty space
"/data_ssd/lwir_sets/lwir_test6/1562390257_977146/v01/ml32b", # first 3
"/data_ssd/lwir_sets/lwir_test6/1562390260_370347/v01/ml32b", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32b", # all 3
"/data_ssd/lwir_sets/lwir_test6/1562390317_693673/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390318_833313/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390326_354823/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390331_483132/v01/ml32", # andrey + olga
"/data_ssd/lwir_sets/lwir_test6/1562390333_192523/v01/ml32", # lena
"/data_ssd/lwir_sets/lwir_test6/1562390402_254007/v01/ml32b", # near moving car
"/data_ssd/lwir_sets/lwir_test6/1562390407_382326/v01/ml32b", # near moving car
"/data_ssd/lwir_sets/lwir_test6/1562390409_661607/v01/ml32b", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test6/1562390435_873048/v01/ml32b", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test6/1562390456_842237/v01/ml32b", # near trees
"/data_ssd/lwir_sets/lwir_test6/1562390460_261151/v01/ml32b"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = 102 # None # put number of test scenes to output (used when making test only from few or single test file
FIXED_TRAIN_LENGTH = 409 # None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32# 16 # 8
FGBGMODE_TESTS = [4] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 4 # 1 # 0 - average, 1 - FG, 2 - BG, 4 - FG/BG closest to AUX
RND_AMPLITUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RMS_MERGE_RATIO = 4.0 # fixing bug in exported data - merging FG/BG for near horizontal surfaces (3.0 < RMS_MERGE_RATIO <5.8)
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIRS = ["ml32b","ml32"] # subdirectory with the ML disparity sweep files (use [0] first!)
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLITUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR, # TFR directory
RMS_MERGE_RATIO) # fixing bug - merging FG+BG for horizontal surfaces
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdirs = MODEL_ML_DIRS,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rms_merge_ratio = RMS_MERGE_RATIO,
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if FIXED_TRAIN_LENGTH is None:
num_train_scenes = len(ex_data.files_train)
else:
num_train_scenes = FIXED_TRAIN_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_train_scenes, # len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data6.py 0000664 0000000 0000000 00000317661 13517677053 0023532 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR #TFR directory
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list:
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
d_gt = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
s_gt = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
else:
d_gt = disparity
s_gt = strength
#next values may be modified to fill gaps, so copy them before
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
'''
fill gaps: up,down,right,left until done
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 0:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode): #0 - average, 1 - FG, 2 - BG, 3 - AUX
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False):# When sel_split, keep only multi-plane tiles (false - only single-plane)
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train2'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test2'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_2'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = ["/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"]
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 0.8 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TEST = 1 # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.5 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
FGBGMODE_TEST, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 :
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d_LE%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = VARIANCE_THRESHOLD, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d_GT%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d_LE%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = VARIANCE_THRESHOLD, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d_GT%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d_LE%4.1f"%(train_var,RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = VARIANCE_THRESHOLD, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d_GT%4.1f"%(train_var,RADIUS,VARIANCE_THRESHOLD)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data7.py 0000664 0000000 0000000 00000325020 13517677053 0023517 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR #TFR directory
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list:
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
# d_gt = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
# s_gt = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
d_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] # still consider FG to be the real ground truth
s_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
else:
d_gt = disparity
s_gt = strength
#next values may be modified to fill gaps, so copy them before
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode): #0 - average, 1 - FG, 2 - BG, 3 - AUX
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train2'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test2'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_3'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32"] # all 3
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TEST = 3 # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
FGBGMODE_TEST, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data8.py 0000664 0000000 0000000 00000325020 13517677053 0023520 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR #TFR directory
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list:
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
# d_gt = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
# s_gt = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
d_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] # still consider FG to be the real ground truth
s_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
else:
d_gt = disparity
s_gt = strength
#next values may be modified to fill gaps, so copy them before
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode): #0 - average, 1 - FG, 2 - BG, 3 - AUX
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train3'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test3'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_4'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32"] # all 3
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TEST = 3 # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
FGBGMODE_TEST, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/explore_data9.py 0000664 0000000 0000000 00000326536 13517677053 0023536 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
#from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR #TFR directory
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f_EXTRA'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list:
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
d_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP] # still consider FG to be the real ground truth
s_gt = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
else:
d_gt = disparity
s_gt = strength
extra = np.concatenate(
(img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS].reshape(-1,1),
img_gt_aux.image[...,ijt.IJFGBG.RMS_SPLIT].reshape(-1,1))
,1)
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
#next values may be modified to fill gaps, so copy them before
'''
fill gaps on ground truth slices only
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 50,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
Currently even in mode s (aux) only sweep files are used (rounded to the nearest step). Consider
using real GT_AUX measured (not available currently as imageJ output, need to modify+rerun
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 1:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
dtype_feature_extra = _dtype_feature(extra)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
e = extra[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z),
'extra': dtype_feature_extra(e)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
print()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6,
debug_level = 0):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for npass in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (debug_level > 3):
print("Pass %d, max_diff = %f"%(npass, max_diff))
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode): #0 - average, 1 - FG, 2 - BG, 3 - AUX
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
if (self.debug_level > 3):
print ("---- nf=%d"%(nf,))
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6,
debug_level = self.debug_level)
if (self.debug_level > 0):
print ("---- nf=%d min = %f mean = %f max = %f"%(
nf,
ds_pair[nf,:,:,0].min(),
ds_pair[nf,:,:,0].mean(),
ds_pair[nf,:,:,0].max()))
print("zero strength",np.nonzero(ds_pair[nf,:,:,1]==0.0))
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = None, # disparity random for each tile
rnd_plate = None): # disparity random for each plate (now 25 tiles)
if not rnd_tile is None:
self.rnd_tile = rnd_tile
if not rnd_plate is None:
self.rnd_plate = rnd_plate
#for file names:
self.min_neibs = min_neibs
self.use_split = use_split
self.keep_split = keep_split
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
seed_list = None,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
if seed_list is None:
seed_list = list(range(self.num_batch_tiles.shape[0]))
all_choices = list(seed_list) # a copy of seed list
all_choices.remove(seed_index) # seed_list made unique by the caller
### list(filter(lambda a: a != seed_index, all_choices)) # remove all instances of seed_index
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
if len(all_choices) == 0:
print ("Nothing left in all_choices!")
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx) # seed_list made unique by the caller
### list(filter(lambda a: a != findx, all_choices)) # remove all instances of findx
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
seed_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
augment_list,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
seed_list,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
seed_list,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
fb = ""
if self.use_split:
fb = ["-FB1","-FB2"][self.keep_split] # single plane - FB1, split FG/BG planes - FB2
tfr_filename+="-RT%1.2f-RP%1.2f-M%d-NB%d%s"%(rnd_tile,rnd_plate,self.fgbg_mode,self.min_neibs, fb)
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
'''
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
'''
augment_list = []
for seed_indx in np.arange(len(files_list)):
if self.num_batch_tiles[seed_indx].sum() >0:
augment_list.append(seed_indx)
seed_list = list(augment_list) # seed list will be modified while augment_list will have unique/full list of suitable files
while len(seed_list) < num_scenes:
seed_list.append(np.random.choice(seed_list))
np.random.shuffle(seed_list)
if len(seed_list) >= num_scenes:
seed_list = seed_list[:num_scenes]
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
seed_list = augment_list,
min_choices = None,
max_files = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train3'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set21'
topdir_test = '/data_ssd/lwir_sets/lwir_test3'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_4'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = [
"/data_ssd/lwir_sets/lwir_test2/1562390202_933097/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_269784/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390225_839538/v01/ml32", # andrey /empty
"/data_ssd/lwir_sets/lwir_test2/1562390243_047919/v01/ml32", # 2 trees
"/data_ssd/lwir_sets/lwir_test2/1562390251_025390/v01/ml32", # empty space
"/data_ssd/lwir_sets/lwir_test2/1562390257_977146/v01/ml32", # first 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_370347/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test2/1562390260_940102/v01/ml32", # all 3
"/data_ssd/lwir_sets/lwir_test3/1562390402_254007/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390407_382326/v01/ml32", # near moving car
"/data_ssd/lwir_sets/lwir_test3/1562390409_661607/v01/ml32", # lena, 2 far moving cars
"/data_ssd/lwir_sets/lwir_test3/1562390435_873048/v01/ml32", # 2 parked cars, lena
"/data_ssd/lwir_sets/lwir_test3/1562390456_842237/v01/ml32", # near trees
"/data_ssd/lwir_sets/lwir_test3/1562390460_261151/v01/ml32"] # near trees, olga
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 1.2 # 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TESTS = [1,3] # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
RND_AMPLIUDE_TRAIN_TILEW = 2.0 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATEW = 0.0 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
USE_SPLIT = False # True, # Select y single/multi-plane tiles (center only)
KEEP_SPLIT = False # When sel_split, keep only multi-plane tiles (false - only single-plane)
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
for model_ml_path in test_sets:
for fgbgmode_test in FGBGMODE_TESTS:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
fgbgmode_test, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 1, #3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 8.0, #100.0,
disparity_max_clip = 8.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 : # not used
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d"%(RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, ## disparity random for each tile - narrow
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE)## disparity random for each plate (now 25 tiles) - narrow
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d"%(train_var,RADIUS,))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = USE_SPLIT, # Select y single/multi-plane tiles (center only)
keep_split = KEEP_SPLIT, # When sel_split, keep only multi-plane tiles (false - only single-plane)
rnd_tile = RND_AMPLIUDE_TRAIN_TILEW, ## disparity random for each tile - wide
rnd_plate = RND_AMPLIUDE_TRAIN_PLATEW)## disparity random for each plate (now 25 tiles) - wide
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d"%(train_var,RADIUS)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/imagej_tiff.py 0000775 0000000 0000000 00000050470 13517677053 0023234 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
'''
/**
* @file imagej_tiff.py
* @brief open multi layer tiff files, display layers and parse meta data
* @par License:
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
'''
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "oleg@elphel.com"
'''
Notes:
- Pillow 5.1.0. Version 4.1.1 throws error (VelueError):
~$ (sudo) pip3 install Pillow --upgrade
~$ python3
>>> import PIL
>>> PIL.PILLOW_VERSION
'5.1.0'
'''
from PIL import Image
import xml.etree.ElementTree as ET
import numpy as np
import matplotlib.pyplot as plt
import sys
import xml.dom.minidom as minidom
import time
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
class IJML:
# as devined in ImageDtt.java
ML_OTHER_TARGET = 0 # Offset to target disparity data in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH = 2 # Offset to ground truth disparity data in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_STRENGTH = 4 # Offset to ground truth confidence data in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_RMS = 6 # Offset to ground truth RMS in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_RMS_SPLIT = 8 # Offset to ground truth combined FG/BG RMS in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_FG_DISP = 10 # Offset to ground truth FG disparity in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_FG_STR = 12 # Offset to ground truth FG strength in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_BG_DISP = 14 # Offset to ground truth BG disparity in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_BG_STR = 16 # Offset to ground truth BG strength in ML_OTHER_INDEX layer tile
ML_OTHER_AUX_DISP = 18 # Offset to AUX heuristic disparity in ML_OTHER_INDEX layer tile
ML_OTHER_AUX_STR = 20 # Offset to AUX heuristic strength in ML_OTHER_INDEX layer tile
# indices
TARGET = ML_OTHER_TARGET // 2
GTRUTH = ML_OTHER_GTRUTH // 2
STRENGTH = ML_OTHER_GTRUTH_STRENGTH // 2
RMS = ML_OTHER_GTRUTH_RMS // 2
RMS_SPLIT = ML_OTHER_GTRUTH_RMS_SPLIT // 2
FG_DISP = ML_OTHER_GTRUTH_FG_DISP // 2
FG_STR = ML_OTHER_GTRUTH_FG_STR // 2
BG_DISP = ML_OTHER_GTRUTH_BG_DISP // 2
BG_STR = ML_OTHER_GTRUTH_BG_STR // 2
AUX_DISP = ML_OTHER_AUX_DISP // 2
AUX_STR = ML_OTHER_AUX_STR // 2
SIGNED = (TARGET, GTRUTH, FG_DISP, BG_DISP)
UNSIGNED_RMS = (RMS, RMS_SPLIT)
NUM_VALUES = 11
class IJFGBG:
DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
DISPARITY = 0
STRENGTH = 1
RMS = 2
RMS_SPLIT = 3
FG_DISP = 4
FG_STR = 5
BG_DISP = 6
BG_STR = 7
AUX_DISP = 8
AUX_STR = 9
# reshape to tiles
def get_tile_images(image, width=8, height=8):
_nrows, _ncols, depth = image.shape
_size = image.size
_strides = image.strides
nrows, _m = divmod(_nrows, height)
ncols, _n = divmod(_ncols, width)
if _m != 0 or _n != 0:
return None
return np.lib.stride_tricks.as_strided(
np.ravel(image),
shape=(nrows, ncols, height, width, depth),
strides=(height * _strides[0], width * _strides[1], *_strides),
writeable=False
)
# TiffFile has no len exception
#import imageio
#from libtiff import TIFF
'''
Description:
Reads a tiff files with multiple layers that were saved by imagej
Methods:
.getstack(items=[])
returns np.array, layers are stacked along depth - think of RGB channels
@items - if empty = all, if not - items[i] - can be layer index or layer's label name
.channel(index)
returns np.array of a single layer
.show_images(items=[])
@items - if empty = all, if not - items[i] - can be layer index or layer's label name
.show_image(index)
Examples:
#1
'''
class imagej_tiff:
# imagej stores labels lengths in this tag
__TIFF_TAG_LABELS_LENGTHS = 50838
# imagej stores labels conents in this tag
__TIFF_TAG_LABELS_STRINGS = 50839
# init
def __init__(self,filename, layers = None, tile_list = None):
# file name
self.fname = filename
tif = Image.open(filename)
# total number of layers in tiff
self.nimages = tif.n_frames
# labels array
self.labels = []
# infos will contain xml data Elphel stores in some of tiff files
self.infos = []
# dictionary from decoded infos[0] xml data
self.props = {}
# bits per sample, type int
self.bpp = tif.tag[258][0]
self.__split_labels(tif.n_frames,tif.tag)
self.__parse_info()
try:
self.nan_bug = self.props['VERSION']== '1.0' # data between min and max is mapped to 0..254 instead of 1.255
except:
self.nan_bug = False # other files, not ML ones
# image layers stacked along depth - (think RGB)
self.image = []
if layers is None:
# fill self.image
for i in range(self.nimages):
tif.seek(i)
a = np.array(tif)
a = np.reshape(a,(a.shape[0],a.shape[1],1))
#a = a[:,:,np.newaxis]
# scale for 8-bits
# exclude layer named 'other'
if self.bpp==8:
_min = self.data_min
_max = self.data_max
_MIN = 1
_MAX = 255
if (self.nan_bug):
_MIN = 0
_MAX = 254
else:
if self.labels[i]!='other':
a[a==0]=np.nan
a = a.astype(float)
if self.labels[i]!='other':
# a[a==0]=np.nan
a = (_max-_min)*(a-_MIN)/(_MAX-_MIN)+_min
# init
if i==0:
self.image = a
# stack along depth (think of RGB channels)
else:
self.image = np.append(self.image,a,axis=2)
else:
if tile_list is None:
indx = 0
for layer in layers:
tif.seek(self.labels.index(layer))
a = np.array(tif)
if not indx:
self.image = np.empty((a.shape[0],a.shape[1],len(layers)),a.dtype)
self.image[...,indx] = a
indx += 1
else:
other_label = "other"
# print(tile_list)
num_tiles = len(tile_list)
num_layers = len(layers)
tiles_corr = np.empty((num_tiles,num_layers,self.tileH*self.tileW),dtype=float)
# tiles_other=np.empty((num_tiles,3),dtype=float)
tiles_other=self.gettilesvalues( # returns nparray of 11 floats (was 3)
tif = tif,
tile_list=tile_list,
label=other_label)
for nl,label in enumerate(layers):
tif.seek(self.labels.index(label)) #'hor-pairs' is not in list
layer = np.array(tif) # 8 or 32 bits
tilesX = layer.shape[1]//self.tileW
for nt,tl in enumerate(tile_list):
ty = tl // tilesX
tx = tl % tilesX
a = np.ravel(layer[self.tileH * ty : self.tileH * (ty+1),
self.tileW * tx : self.tileW * (tx+1)])
#convert from int8
if self.bpp==8:
a = a.astype(float)
if np.isnan(tiles_other[nt][0]):
# print("Skipping NaN tile ",tl)
a[...] = np.nan
else:
_min = self.data_min
_max = self.data_max
_MIN = 1
_MAX = 255
if (self.nan_bug):
_MIN = 0
_MAX = 254
else:
a[a==0] = np.nan
a = (_max-_min)*(a-_MIN)/(_MAX-_MIN)+_min
tiles_corr[nt,nl] = a
pass
pass
self.corr2d = tiles_corr
self.target_disparity = tiles_other[...,0]
self.gt_ds = tiles_other[...,1:3]
self.payload = tiles_other#[...,0:12]
pass
# init done, close the image
tif.close()
# label == tiff layer name
def getvalues(self,label=""):
l = self.getstack([label],shape_as_tiles=True)
res = np.empty((l.shape[0],l.shape[1], IJML.NUM_VALUES)) # was just 3
for i in range(res.shape[0]):
for j in range(res.shape[1]):
# 9x9 -> 81x1
m = np.ravel(l[i,j])
if self.bpp==32:
for k in range(res.shape[2]):
res[i,j,k] = m[k * 2]
elif self.bpp==8:
for k in range(res.shape[2]):
if k in IJML.SIGNED:
res[i,j,k] = ((m[2 * k] - 128) * 256 + m[2 * k + 1]) / 128
elif k in IJML.UNSIGNED_RMS:
res[i,j,k] = (m[2 * k]*256+m[2 * k + 1])/4096.0
else:
res[i,j,k] = (m[2 * k]*256+m[2 * k + 1])/65536.0
else:
for k in range(res.shape[2]):
res[i,j,k] = np.nan
# NaNize - TODO: update !
if self.bpp==8:
a = res[:,:,0]
a[a==-256] = np.nan
b = res[:,:,1]
b[b==-256] = np.nan
c = res[:,:,2]
c[c==0] = np.nan
return res
# 3 values per tile: target disparity, GT disparity, GT confidence
# With LWIR/aux there are more!
def gettilesvalues(self,
tif,
tile_list,
label=""):
res = np.empty((len(tile_list), IJML.NUM_VALUES),dtype=float) # was only 3
tif.seek(self.labels.index(label))
layer = np.array(tif) # 8 or 32 bits
tilesX = layer.shape[1]//self.tileW
for i,tl in enumerate(tile_list):
ty = tl // tilesX
tx = tl % tilesX
m = np.ravel(layer[self.tileH*ty:self.tileH*(ty+1),self.tileW*tx:self.tileW*(tx+1)])
if self.bpp==32:
for k in range(res.shape[1]):
res[i,k] = m[k * 2]
elif self.bpp==8:
for k in range(res.shape[1]):
if k in IJML.SIGNED:
res[i,k] = ((m[2 * k] - 128) * 256 + m[2 * k + 1]) / 128
elif k in IJML.UNSIGNED_RMS:
res[i,k] = (m[2 * k]*256+m[2 * k + 1])/4096.0
else:
res[i,k] = (m[2 * k]*256+m[2 * k + 1])/65536.0
else:
for k in range(res.shape[1]):
res[i,k] = np.nan
# NaNize update!
if self.bpp==8:
a = res[...,0]
a[a==-256] = np.nan
b = res[...,1]
b[b==-256] = np.nan
c = res[...,2]
c[c==0] = np.nan
return res
# get ordered stack of images by provided items
# by index or label name
def getstack(self,items=[],shape_as_tiles=False):
a = ()
if len(items)==0:
b = self.image
else:
for i in items:
if type(i)==int:
a += (self.image[:,:,i],)
elif type(i)==str:
j = self.labels.index(i)
a += (self.image[:,:,j],)
# stack along depth
b = np.stack(a,axis=2)
if shape_as_tiles:
b = get_tile_images(b,self.tileW,self.tileH)
return b
# get np.array of a channel
# * does not handle out of bounds
def channel(self,index):
return self.image[:,:,index]
# display images by index or label
def show_images(self,items=[]):
# show listed only
if len(items)>0:
for i in items:
if type(i)==int:
self.show_image(i)
elif type(i)==str:
j = self.labels.index(i)
self.show_image(j)
# show all
else:
for i in range(self.nimages):
self.show_image(i)
# display single image
def show_image(self,index):
# display using matplotlib
t = self.image[:,:,index]
mytitle = "("+str(index+1)+" of "+str(self.nimages)+") "+self.labels[index]
fig = plt.figure()
fig.canvas.set_window_title(self.fname+": "+mytitle)
fig.suptitle(mytitle)
#plt.imshow(t,cmap=plt.get_cmap('gray'))
plt.imshow(t)
plt.colorbar()
# display using Pillow - need to scale
# remove NaNs - no need
#t[np.isnan(t)]=np.nanmin(t)
# scale to [min/max*255:255] range
#t = (1-(t-np.nanmax(t))/(t-np.nanmin(t)))*255
#tmp_im = Image.fromarray(t)
#tmp_im.show()
# puts etrees in infoss
def __parse_info(self):
infos = []
for info in self.infos:
infos.append(ET.fromstring(info))
self.infos = infos
# specifics
# properties dictionary
pd = {}
if infos:
for child in infos[0]:
#print(child.tag+"::::::"+child.text)
pd[child.tag] = child.text
self.props = pd
# tiles are squares
self.tileW = int(self.props['tileWidth'])
self.tileH = int(self.props['tileWidth'])
if self.bpp==8:
self.data_min = float(self.props['data_min'])
self.data_max = float(self.props['data_max'])
# makes arrays of labels (strings) and unparsed xml infos
def __split_labels(self,n,tag):
# list
tag_lens = tag[self.__TIFF_TAG_LABELS_LENGTHS]
# string
tag_labels = tag[self.__TIFF_TAG_LABELS_STRINGS].decode()
# remove 1st element: it's something like IJIJlabl..
tag_labels = tag_labels[tag_lens[0]:]
tag_lens = tag_lens[1:]
# the last ones are images labels
# normally the difference is expected to be 0 or 1
skip = len(tag_lens) - n
self.labels = []
self.infos = []
for l in tag_lens:
string = tag_labels[0:l].replace('\x00','')
if skip==0:
self.labels.append(string)
else:
self.infos.append(string)
skip -= 1
tag_labels = tag_labels[l:]
#MAIN
if __name__ == "__main__":
try:
fname = sys.argv[1]
except IndexError:
fname = "/data_ssd/lwir3d/models/002/1562390096_605721/v01/ml32/1562390096_605721-ML_DATA-32B-AOT-FZ0.03-AG.tiff"
# fname = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train/1527182807_896892/v02/ml/1527182807_896892-ML_DATA-08B-O-FZ0.05-OFFS0.40000.tiff"
# fname = "1521849031_093189-ML_DATA-32B-O-OFFS1.0.tiff"
# fname = "1521849031_093189-ML_DATA-08B-O-OFFS1.0.tiff"
#fname = "1521849031_093189-DISP_MAP-D0.0-46.tif"
#fname = "1526905735_662795-ML_DATA-08B-AIOTD-OFFS2.0.tiff"
#fname = "test.tiff"
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
ijt = imagej_tiff(fname)
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
print("TIFF stack labels: "+str(ijt.labels))
#print(ijt.infos)
rough_string = ET.tostring(ijt.infos[0], "utf-8")
reparsed = minidom.parseString(rough_string)
print(reparsed.toprettyxml(indent="\t"))
#print(ijt.props)
# needed properties:
print("Tiles shape: "+str(ijt.tileW)+"x"+str(ijt.tileH))
try:
print("Data min: "+str(ijt.data_min))
print("Data max: "+str(ijt.data_max))
except:
print (" No min/max are provided in 32-bit mode)")
print(ijt.image.shape)
# layer order: ['diagm-pair', 'diago-pair', 'hor-pairs', 'vert-pairs', 'other']
# now split this into tiles:
#tiles = get_tile_images(ijt.image,ijt.tileW,ijt.tileH)
#print(tiles.shape)
# tiles = ijt.getstack(['diagm-pair','diago-pair','hor-pairs','vert-pairs'],shape_as_tiles=True)
tiles = ijt.getstack(['diagm-aux','diago-aux','hor-aux','vert-aux'],shape_as_tiles=True)
print("Stack of images shape: "+str(tiles.shape))
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
# provide layer name
values = ijt.getvalues(label='other')
print("Stack of values shape: "+str(values.shape))
# each tile's disparity:
fig = plt.figure()
fig.suptitle("Estimated Disparity")
plt.imshow(values[:,:,0])
plt.colorbar()
fig = plt.figure()
fig.suptitle("Esitmated+Residual disparity")
plt.imshow(values[:,:,1])
plt.colorbar()
fig = plt.figure()
fig.suptitle("Residual disparity confidence")
plt.imshow(values[:,:,2])
plt.colorbar()
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
#print(values)
#print(value_tiles[131,162].flatten())
#print(np.ravel(value_tiles[131,162]))
#values = np.empty((vt.shape[0],vt.shape[1],3))
#for i in range(values.shape[0]):
# for j in range(values.shape[1]):
# values[i,j,0] = get_v1()
#print(tiles[121,160,:,:,0].shape)
#_nrows = int(ijt.image.shape[0] / ijt.tileH)
#_ncols = int(ijt.image.shape[1] / ijt.tileW)
#_nrows = 32
#_ncols = 32
#print(str(_nrows)+" "+str(_ncols))
#fig, ax = plt.subplots(nrows=_nrows, ncols=_ncols)
#for i in range(_nrows):
# for j in range(_ncols):
# ax[i,j].imshow(tiles[i+100,j,:,:,0])
# ax[i,j].set_axis_off()
#for i in range(5):
# fig = plt.figure()
# plt.imshow(tiles[121,160,:,:,i])
# plt.colorbar()
#ijt.show_images(['other'])
#ijt.show_images([0,3])
#ijt.show_images(['X-corr','Y-corr'])
#ijt.show_images(['R-vign',3])
ijt.show_images()
plt.show()
input("All done. Press ENTER to close images and exit...")
# Examples
# 1: get default stack of images
#a = ijt.getstack()
#print(a.shape)
# 2: get defined ordered stack of images by tiff image index or by label name
#a = ijt.getstack([1,2,'X-corr'])
#print(a.shape)
# 3: will throw an error if there's no such label
#a = ijt.getstack([1,2,'Unknown'])
#print(a.shape)
# 4: will throw an error if index is out of bounds
#a = ijt.getstack([1,2,'X-corr'])
#print(a.shape)
# 5: dev excercise
#a = np.array([[1,2],[3,4]])
#b = np.array([[5,6],[7,8]])
#c = np.array([[10,11],[12,13]])
#print("test1:")
#ka = (a,b,c)
#d = np.stack(ka,axis=2)
#print(d)
#print("test2:")
#e = np.stack((d[:,:,1],d[:,:,0]),axis=2)
#print(e)
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/imagej_tiffwriter.py 0000664 0000000 0000000 00000012031 13517677053 0024455 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
'''
/**
* @file imagej_tiffwriter.py
* @brief save tiffs for imagej (1.52d+) - with stacks and hyperstacks
* @par License:
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
'''
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "oleg@elphel.com"
'''
Usage example:
import imagej_tiffwriter
import numpy as np
Have a few images in the form of numpy arrays np.float32:
- (h,w)
- (n,h,w)
Labels can be provided as a list: ['label1','label2', etc.]
No list length check against number of images
imagej_tiffwriter.save(path,images,labels)
'''
import numpy as np
import struct
import tifffile
import math
# from here: https://stackoverflow.com/questions/50258287/how-to-specify-colormap-when-saving-tiff-stack
def imagej_metadata_tags(metadata, byteorder):
"""Return IJMetadata and IJMetadataByteCounts tags from metadata dict.
The tags can be passed to the TiffWriter.save function as extratags.
"""
header = [{'>': b'IJIJ', '<': b'JIJI'}[byteorder]]
bytecounts = [0]
body = []
def writestring(data, byteorder):
return data.encode('utf-16' + {'>': 'be', '<': 'le'}[byteorder])
def writedoubles(data, byteorder):
return struct.pack(byteorder+('d' * len(data)), *data)
def writebytes(data, byteorder):
return data.tobytes()
metadata_types = (
('Info', b'info', 1, writestring),
('Labels', b'labl', None, writestring),
('Ranges', b'rang', 1, writedoubles),
('LUTs', b'luts', None, writebytes),
('Plot', b'plot', 1, writebytes),
('ROI', b'roi ', 1, writebytes),
('Overlays', b'over', None, writebytes))
for key, mtype, count, func in metadata_types:
if key not in metadata:
continue
if byteorder == '<':
mtype = mtype[::-1]
values = metadata[key]
if count is None:
count = len(values)
else:
values = [values]
header.append(mtype + struct.pack(byteorder+'I', count))
for value in values:
data = func(value, byteorder)
body.append(data)
bytecounts.append(len(data))
body = b''.join(body)
header = b''.join(header)
data = header + body
bytecounts[0] = len(header)
bytecounts = struct.pack(byteorder+('I' * len(bytecounts)), *bytecounts)
return ((50839, 'B', len(data), data, True),
(50838, 'I', len(bytecounts)//4, bytecounts, True))
#def save(path,images,force_stack=False,force_hyperstack=False):
def save(path,images,labels=None,label_prefix="Label "):
'''
labels a list or None
'''
'''
Expecting:
(h,w),
(n,h,w) - just create a simple stack
'''
# Got images, analyze shape:
# - possible formats (c == depth):
# -- (t,z,h,w,c)
# -- (t,h,w,c), t or z does not matter
# -- (h,w,c)
# -- (h,w)
# 0 or 1 images.shapes are not handled
#
# (h,w)
if len(images.shape)==2:
images = images[np.newaxis,...]
# now the shape length is 3
if len(images.shape)==3:
# tifffile treats shape[0] as channel, need to expand to get labels displayed
#images = images[images.shape[0],np.newaxis,images.shape[1],images.shape[2]]
images = np.reshape(images,(images.shape[0],1,images.shape[1],images.shape[2]))
labels_list = []
if labels is None:
for i in range(images.shape[0]):
labels_list.append(label_prefix+str(i+1))
else:
labels_list = labels
ijtags = imagej_metadata_tags({'Labels':labels_list}, '<')
with tifffile.TiffWriter(path, bigtiff=False,imagej=True) as tif:
for i in range(images.shape[0]):
tif.save(images[i], metadata={'version':'1.11a','loop': False}, extratags=ijtags)
# Testing
if __name__ == "__main__":
def hamming_window(x,N):
y = 0.54 - 0.46*math.cos(2*math.pi*x/(N-1))
return y
hw = hamming_window
NT = 5
NX = 512
NY = 512
images = np.empty((NT,NY,NX),np.float32)
import time
print(str(time.time())+": Generating test images")
for t in range(NT):
images[t,:,:] = np.array([[(255-t*25)*hw(i,512)*hw(j,512) for i in range(NX)] for j in range(NY)],np.float32)
print(str(time.time())+": Test images generated")
print("Images shape: "+str(images.shape))
v = save("tiffwriter_test.tiff",images)
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/infer_qcds_01.py 0000664 0000000 0000000 00000042521 13517677053 0023400 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
from tensorflow.python.framework.ops import GraphKeys
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
'''
Builds (and saved) inference model from trained by nn_ds_neibs21.py
Saves the model and weights in 2 formats - using Saver (for Python) and Saved_Model (for Java or Python)
(old line, but still can be used) Model and weights are used by the inference-only infer_qcds_graph.py
Usage:
~$ python3 infer_qcds_01.py qcstereo_conf.xml data_sets
qcstereo_conf.xml - config file with all paths
from builtins import None
data_sets - root dir for trained model/checkpoints, etc.
'''
import os
import sys
import numpy as np
import time
import shutil
import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
from tensorflow.python.ops import resource_variable_ops
tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
#IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
IMG_WIDTH = None
IMG_HEIGHT = None
#WIDTH = 160
#HEIGHT = 120
"""
Next gets globals from the config file
"""
globals().update(parameters)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
#SLICE_LABELS = ["nn_out_ext","heur_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
ph_corr2d = tf.compat.v1.placeholder(np.float32, (None,FEATURES_PER_TILE), name = 'ph_corr2d')
ph_target_disparity = tf.compat.v1.placeholder(np.float32, (None,1), name = 'ph_target_disparity')
ph_ntile = tf.compat.v1.placeholder(np.int32, (None,), name = 'ph_ntile') #nTile
ph_ntile_out = tf.compat.v1.placeholder(np.int32, (None,), name = 'ph_ntile_out') #which tiles should be calculated in stage2
#corr2d9x325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE]) , tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1])],2)
tf_intile325 = tf.concat([ph_corr2d, ph_target_disparity],axis=1,name="tf_intile325") # [?,325]
pass
"""
target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
target_disparity_cluster], axis=2, name = "corr2d_Nx325")
"""
cluster_radius = CLUSTER_RADIUS
"""
Probably ResourceVariable is not needed here because of the tf.scatter_update()
If collection is not provided, it defaults to [GraphKeys.GLOBAL_VARIABLES], and that in turn fails saver.restore() as this variable was not available in the trained model
"""
'''
#rv_stage1_out = resource_variable_ops.ResourceVariable(
rv_stage1_out = tf.Variable(
np.zeros([HEIGHT * WIDTH, NN_LAYOUT1[-1]]),
## collections = [],
collections = [GraphKeys.LOCAL_VARIABLES],# Works, available with tf.local_variables()
dtype=np.float32,
name = 'rv_stage1_out')
'''
rv_stage1_out = tf.compat.v1.get_variable("rv_stage1_out",
shape=[IMG_HEIGHT * IMG_WIDTH, NN_LAYOUT1[-1]],
dtype=tf.float32,
initializer=tf.zeros_initializer,
collections = [GraphKeys.LOCAL_VARIABLES],trainable=False)
#rv_stageX_out_init_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[HEIGHT * WIDTH, NN_LAYOUT1[-1]])
#rv_stageX_out_init_op = rv_stageX_out.assign(rv_stageX_out_init_placeholder)
##stage1_tiled = tf.reshape(rv_stage1_out.read_value(),[HEIGHT, WIDTH, -1], name = 'stage1_tiled')
stage1_tiled = tf.reshape(rv_stage1_out, [IMG_HEIGHT, IMG_WIDTH, -1], name = 'stage1_tiled') # no need to synchronize here?
tf_stage1_exth = tf.concat([stage1_tiled[:,:1,:]]*cluster_radius +
[stage1_tiled] +
[stage1_tiled[:,-1:,:]]*cluster_radius, axis = 1,name = 'stage1_exth')
tf_stage1_ext = tf.concat([tf_stage1_exth[ :1,:,:]]*cluster_radius +
[tf_stage1_exth] +
[tf_stage1_exth[-1:,:,:]]*cluster_radius, axis = 0, name = 'stage1_exth')
tf_stage1_ext4 = tf.expand_dims(tf_stage1_ext, axis = 2, name = 'stage1_ext4')
concat_list = []
cluster_side = 2 * cluster_radius+1
for dy in range(cluster_side):
for dx in range(cluster_side):
# concat_list.append(tf_stage1_ext4[dy: cluster_side-dy, dx: cluster_side-dx,:,:])
concat_list.append(tf.slice(tf_stage1_ext4,[dy,dx,0,0],[IMG_HEIGHT, IMG_WIDTH,-1,-1]))
pass
tf_stage2_inm = tf.concat(concat_list, axis = 2, name ='stage2_inm') #242, 324, 25, 64
tf_stage2_in = tf.reshape(tf_stage2_inm,[-1,rv_stage1_out.shape[1]*cluster_side*cluster_side], name = 'stage2_in')
tf_stage2_in_sparse = tf.gather(tf_stage2_in, indices= ph_ntile_out, axis=0, name = 'stage2_in_sparse')
#aextv=np.concatenate([a[:,:1,:]]*2 + [a] + [a[:,-1:,:]]*2,axis = 1)
#ext=np.concatenate([aextv[:1,:,:]]*1 + [aextv] + [aextv[-1:,:,:]]*3,axis = 0)
with tf.name_scope("Disparity_net"): # to have the same scope for weight/biases?
ns, _ = qcstereo_network.network_sub(tf_intile325,
input_global = [None,ph_target_disparity][SPREAD_CONVERGENCE], # input_global[:,i,:],
layout= NN_LAYOUT1,
reuse= False,
sym8 = SYM8_SUB,
cluster_radius = 0)
update=tf.scatter_update(ref=rv_stage1_out,
indices = ph_ntile,
updates = ns,
use_locking = False,
name = 'update')
with tf.control_dependencies([update]):
stage1done = tf.constant(1, dtype=tf.int32, name="stage1done")
pass
stage2_out_sparse0 = qcstereo_network.network_inter (
input_tensor = tf_stage2_in_sparse,
input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = NN_LAYOUT2,
reuse = False,
use_confidence = False)
stage2_out_sparse = tf.identity(stage2_out_sparse0, name = 'stage2_out_sparse')
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full0 = qcstereo_network.network_inter (
input_tensor = tf_stage2_in,
input_global = None, # [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = NN_LAYOUT2,
reuse = True,
use_confidence = False)
stage2_out_full = tf.identity(stage2_out_full0, name = 'stage2_out_full')
pass
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
"""
This is needed if ResourceVariable is used - then i/o tensors names somehow disappeared
and were replaced by 'Placeholder_*'
collection_io = 'collection_io'
tf.add_to_collection(collection_io, ph_corr2d)
tf.add_to_collection(collection_io, ph_target_disparity)
tf.add_to_collection(collection_io, ph_ntile)
tf.add_to_collection(collection_io, ph_ntile_out)
tf.add_to_collection(collection_io, stage1done)
tf.add_to_collection(collection_io, stage2_out_sparse)
"""
##saver=tf.compat.v1.train.Saver()
saver =tf.compat.v1.train.Saver(tf.global_variables())
#saver = tf.compat.v1.train.Saver(tf.global_variables()+tf.local_variables())
saver_def = saver.as_saver_def()
pass
"""
saver_def = saver.as_saver_def()
# The name of the tensor you must feed with a filename when saving/restoring.
print ('saver_def.filename_tensor_name=',saver_def.filename_tensor_name)
# The name of the target operation you must run when restoring.
print ('saver_def.restore_op_name=',saver_def.restore_op_name)
# The name of the target operation you must run when saving.
print ('saver_def.save_tensor_name=',saver_def.save_tensor_name)
saver_def.filename_tensor_name= save/Const:0
saver_def.restore_op_name= save/restore_all
saver_def.save_tensor_name= save/control_dependency:0
print(saver.save(sess, files["checkpoints"]))
"""
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver.restore(sess, files["checkpoints"])
'''
rv_stage1_out belongs to GraphKeys.LOCAL_VARIABLES
Now when weights/biases are restored from 'checkpoints',
that do not have this variable, add it to globals.
Actually it could have been declared right here - this
needs testing.
NOTE1: The line below makes the next script's, that saves
a Saved_Model MetaGraph, size of the Saved_Model significantly
bigger.
NOTE2: The line below is commented in favor of (in the next script!):
builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
'''
#tf.add_to_collection(GraphKeys.GLOBAL_VARIABLES, rv_stage1_out)
saver.save(sess, files["inference"])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
#_ = sess.run([rv_stageX_out_init_op],feed_dict={rv_stageX_out_init_placeholder: np.zeros((HEIGHT * WIDTH, NN_LAYOUT1[-1]))})
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # [?,324)
img_target = dataset_img['target_disparity'] # [?,324)
img_ntile = dataset_img['ntile'].reshape([-1])
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
'''
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file,
rslt.reshape(IMG_HEIGHT,IMG_WIDTH,-1))
rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=CLUSTER_RADIUS,
logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True, labels=SLICE_LABELS, logfile=lf)
'''
extra = dataset_img['t_extra']
if extra is None:
extra = np.array([dataset_img['gtruths'].shape[0],0])
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest], disp_out.shape[0],BATCH_SIZE
dataset_img['gtruths'], # gtruths[ntest],
#skip 6 empty layers
np.zeros([IMG_HEIGHT * IMG_WIDTH, 6], dtype = disp_out.dtype),
# dbg_cost_nw.reshape(-1,1),
# dbg_cost_w.reshape(-1,1),
# dbg_d.reshape(-1,1),
# dbg_avg_disparity.reshape(-1,1),
# dbg_gt_disparity.reshape(-1,1),
# dbg_offs.reshape(-1,1),
extra, # len 3..6, #adding extra data layers
],1)
num_slices = rslt.shape[1]
np.save(
result_file,
rslt.reshape(IMG_HEIGHT, IMG_WIDTH,-1))
eval_rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1,
logfile=lf)
# num_slices = eval_rslt.shape[1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(
result_file,
ABSOLUTE_DISPARITY,
fix_nan = True,
labels=qsf.SLICE_LABELS[0:num_slices],
logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
"""
Save MetaGraph to Saved_Model in *.pb (protocol buffer) format to
be able to use from Java
"""
# force clean
shutil.rmtree(dirs['exportdir'], ignore_errors=True)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(dirs['exportdir'])
# builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
builder.add_meta_graph_and_variables(sess,[tf.saved_model.SERVING],main_op=tf.local_variables_initializer())
builder.save(False) # True = *.pbtxt, False = *.pb
if lf:
lf.close()
writer.close()
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/infer_qcds_graph_01.py 0000664 0000000 0000000 00000021567 13517677053 0024570 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
'''
** Kind of obsolete now, can be used for testing **
Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
'''
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
meta_graph_source = files["inference"]+'.meta'
print("Model restore: using conventionally saved model, but saving Saved Model for the next run")
print("MetaGraph source = "+str(meta_graph_source))
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
infer_saver.restore(sess, files["inference"])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
"""
Save MetaGraph to Saved_Model in *.pb (protocol buffer) format to
be able to use from Java
"""
# force clean
shutil.rmtree(dirs['exportdir'], ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(dirs['exportdir'])
builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],main_op=tf.local_variables_initializer())
builder.save(False) # True = *.pbtxt, False = *.pb
if lf:
lf.close()
writer.close()
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/infer_qcds_graph_01_pbtest.py 0000664 0000000 0000000 00000021174 13517677053 0026143 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
# Just inference, currently uses /data_ssd/data_sets/tf_data_5x5_main_13_heur/inference/
# TODO: Updatew for LWIR !
import os
import sys
import numpy as np
import time
import shutil
##import qcstereo_network
import qcstereo_functions as qsf
import tensorflow as tf
#from tensorflow.python.ops import resource_variable_ops
#tf.ResourceVariable = resource_variable_ops.ResourceVariable
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
USE_SPARSE_ONLY = True
LOGFILE="results-infer.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
WIDTH = 324
HEIGHT = 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
##NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength"]#,
# "cutcorn_cost_nw","cutcorn_cost",
# "gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs,
files,
suffix = SUFFIX)
"""
Next is tag for pb (pb == protocol buffer) model
"""
#PB_TAGS = ["model_pb"]
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
image_data = qsf.initImageData( # just use image_data[0]
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
cluster_radius = CLUSTER_RADIUS
ROOT_PATH = './attic/infer_qcds_graph'+SUFFIX+"/" # for tensorboard
try:
os.makedirs(os.path.dirname(files['inference']))
print ("Created directory ",os.path.dirname(files['inference']))
except:
pass
with tf.Session() as sess:
# Actually, refresh all the time and have an extra script to restore from it.
# use_Saved_Model = False
#if os.path.isdir(dirs['exportdir']):
# # check if dir contains "Saved Model" model
# use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
#if use_saved_model:
# print("Model restore: using Saved_Model model MetaGraph protocol buffer")
# meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
#else:
use_saved_model = tf.saved_model.loader.maybe_saved_model_directory(dirs['exportdir'])
if not use_saved_model:
print("ERROR: Saved_Model not found. Run previous script to create it.")
sys.exit()
meta_graph_source = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], dirs['exportdir'])
infer_saver = tf.train.import_meta_graph(meta_graph_source)
graph=tf.get_default_graph()
ph_corr2d = graph.get_tensor_by_name('ph_corr2d:0')
ph_target_disparity = graph.get_tensor_by_name('ph_target_disparity:0')
ph_ntile = graph.get_tensor_by_name('ph_ntile:0')
ph_ntile_out = graph.get_tensor_by_name('ph_ntile_out:0')
stage1done = graph.get_tensor_by_name('Disparity_net/stage1done:0') #,
stage2_out_sparse = graph.get_tensor_by_name('Disparity_net/stage2_out_sparse:0')#not found
if not USE_SPARSE_ONLY: #Does it reduce the graph size?
stage2_out_full = graph.get_tensor_by_name('Disparity_net/stage2_out_full:0')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
#infer_saver.restore(sess, dirs['exportdir'])
#infer_saver.restore(sess, files["inference"])
infer_saver.restore(sess,dirs['exportdir']+'/variables/variables')
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(ROOT_PATH, sess.graph)
lf = None
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
for nimg,_ in enumerate(image_data):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = nimg,
cluster_radius = 0, # CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True,
infer = True,
keep_gt = True) # to generate same output files
img_corr2d = dataset_img['corr2d'] # (?,324)
img_target = dataset_img['target_disparity'] # (?,1)
img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32
#run first stage network
qsf.print_time("Running inferred model, stage1", end=" ")
_ = sess.run([stage1done],
feed_dict={ph_corr2d: img_corr2d,
ph_target_disparity: img_target,
ph_ntile: img_ntile })
qsf.print_time("Done.")
qsf.print_time("Running inferred model, stage2", end=" ")
disp_out, = sess.run([stage2_out_sparse],
feed_dict={ph_ntile_out: img_ntile })
qsf.print_time("Done.")
result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf) # (re-loads results). Only uses first 4 layers
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
image_data[nimg] = None
if lf:
lf.close()
writer.close()
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/nn_ds_neibs30.py 0000664 0000000 0000000 00000110405 13517677053 0023404 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018-2019, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#python3 nn_ds_neibs30.py /data_ssd/lwir_sets/conf/qcstereo_lwir01.xml /data_ssd/lwir_sets/
#tensorboard --logdir="nn_ds_neibs30_graph13-9RNSWLAM0.5SLAM0.1SCLP0.2_nG_nI_HF_CP0.3_S0.03" --port=7001
import os
import sys
import numpy as np
import time
import shutil
from threading import Thread
import qcstereo_network
import qcstereo_losses
import qcstereo_functions as qsf
import tensorflow as tf
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 20 # 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
LOGFILE="results.txt"
"""
Next gets globals from the config file
"""
globals().update(parameters)
TRAIN_BUFFER_SIZE = TRAIN_BUFFER_GPU * TRAIN_BUFFER_CPU # in merged (quad) batches
#exit(0)
WIDTH = 20 # 324
HEIGHT = 15 # 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels
SLICE_LABELS = ["nn_out_ext","hier_out_ext","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt-avg_dist","avg8_disp","gt_disp","out-avg"]
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#copy config to results directory
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
if TEST_TITLES is None:
TEST_TITLES = qsf.defaultTestTitles(files)
partials = None
partials = qsf.concentricSquares(CLUSTER_RADIUS)
PARTIALS_WEIGHTS = [1.0*pw/sum(PARTIALS_WEIGHTS) for pw in PARTIALS_WEIGHTS]
if not USE_PARTIALS:
partials = partials[0:1]
PARTIALS_WEIGHTS = [1.0]
qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS,
labels = SLICE_LABELS,
logpath= LOGPATH)
image_data = qsf.initImageData(
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True)
corr2d_len, target_disparity_len, gtds_len, _ = qsf.get_lengths(CLUSTER_RADIUS, TILE_LAYERS, TILE_SIDE)
train_next, dataset_train, datasets_test= qsf.initTrainTestData(
files = files,
cluster_radius = CLUSTER_RADIUS,
buffer_size = TRAIN_BUFFER_SIZE * BATCH_SIZE, # number of clusters per train
test_titles = TEST_TITLES)
corr2d_train_placeholder = tf.compat.v1.placeholder(dataset_train.dtype, (None,FEATURES_PER_TILE * cluster_size)) # corr2d_train.shape)
target_disparity_train_placeholder = tf.compat.v1.placeholder(dataset_train.dtype, (None,1 * cluster_size)) #target_disparity_train.shape)
gt_ds_train_placeholder = tf.compat.v1.placeholder(dataset_train.dtype, (None,2 * cluster_size)) #gt_ds_train.shape)
dataset_tt = tf.data.Dataset.from_tensor_slices({
"corr2d": corr2d_train_placeholder,
"target_disparity": target_disparity_train_placeholder,
"gt_ds": gt_ds_train_placeholder})
tf_batch_weights = tf.compat.v1.placeholder(shape=(None,), dtype=tf.float32, name = "batch_weights") # way to increase importance of the high variance clusters
feed_batch_weights = np.array(BATCH_WEIGHTS*(BATCH_SIZE//len(BATCH_WEIGHTS)), dtype=np.float32)
feed_batch_weight_1 = np.array([1.0], dtype=np.float32)
dataset_test_size = len(datasets_test[0])
dataset_test_size //= BATCH_SIZE
dataset_img_size = len(image_data[0]['corr2d'])
dataset_img_size //= BATCH_SIZE
dataset_tt = dataset_tt.batch(BATCH_SIZE)
dataset_tt = dataset_tt.prefetch(BATCH_SIZE)
iterator_tt = dataset_tt.make_initializable_iterator()
next_element_tt = iterator_tt.get_next()
result_dir = './attic/result_neibs_'+ SUFFIX+'/'
checkpoint_dir = './attic/result_neibs_'+ SUFFIX+'/'
save_freq = 500
def debug_gt_variance(
indx, # This tile index (0..8)
center_indx, # center tile index
gt_ds_batch # [?:9:2]
):
with tf.name_scope("Debug_GT_Variance"):
d_gt_this = tf.reshape(gt_ds_batch[:,2 * indx],[-1], name = "d_this")
d_gt_center = tf.reshape(gt_ds_batch[:,2 * center_indx],[-1], name = "d_center")
d_gt_diff = tf.subtract(d_gt_this, d_gt_center, name = "d_diff")
d_gt_diff2 = tf.multiply(d_gt_diff, d_gt_diff, name = "d_diff2")
d_gt_var = tf.reduce_mean(d_gt_diff2, name = "d_gt_var")
return d_gt_var
target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
target_disparity_cluster], axis=2, name = "corr2d_Nx325")
if SPREAD_CONVERGENCE:
outs, inp_weights = qcstereo_network.networks_siam(
input = corr2d_Nx325,
input_global = target_disparity_cluster,
layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2,
inter_convergence = INTER_CONVERGENCE,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE, #Remove/put None for normal operation
partials = partials,
use_confidence= USE_CONFIDENCE,
cluster_radius = CLUSTER_RADIUS)
else:
outs, inp_weights = qcstereo_network.networks_siam(
input_tensor= corr2d_Nx325,
input_global = None,
layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2,
inter_convergence = False,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE, #Remove/put None for normal operation
partials = partials,
use_confidence= USE_CONFIDENCE,
cluster_radius = CLUSTER_RADIUS)
tf_partial_weights = tf.constant(PARTIALS_WEIGHTS,dtype=tf.float32,name="partial_weights")
G_losses = [0.0]*len(partials)
target_disparity_batch= next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1]
gt_ds_batch_clust = next_element_tt['gt_ds']
gt_ds_batch = gt_ds_batch_clust[:,2 * center_tile_index: 2 * (center_tile_index +1)]
G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _cost1 = qcstereo_losses.batchLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result
target_disparity_batch= target_disparity_batch, # next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1], # target_disparity_batch_center, # next_element_tt['target_disparity'], # target_disparity, ### target_d, # [batch_size] tf placeholder
gt_ds_batch = gt_ds_batch, # next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)], # gt_ds_batch_center, ## next_element_tt['gt_ds'], # gt_ds, ### gt, # [batch_size,2] tf placeholder
batch_weights = tf_batch_weights,
disp_diff_cap = DISP_DIFF_CAP,
disp_diff_slope= DISP_DIFF_SLOPE,
absolute_disparity = ABSOLUTE_DISPARITY,
use_confidence = USE_CONFIDENCE, # True,
lambda_conf_avg = 0.01,
## lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 2.0,
error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
G_loss = G_losses[0]
for n in range (1,len(partials)):
G_losses[n], _, _, _, _, _, _, _ = qcstereo_losses.batchLoss(
out_batch = outs[n], # [batch_size,(1..2)] tf_result
target_disparity_batch= target_disparity_batch, #next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1], # target_disparity_batch_center, # next_element_tt['target_disparity'], # target_disparity, ### target_d, # [batch_size] tf placeholder
gt_ds_batch = gt_ds_batch, # next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)], # gt_ds_batch_center, ## next_element_tt['gt_ds'], # gt_ds, ### gt, # [batch_size,2] tf placeholder
batch_weights = tf_batch_weights,
disp_diff_cap = DISP_DIFF_CAP,
disp_diff_slope= DISP_DIFF_SLOPE,
absolute_disparity = ABSOLUTE_DISPARITY,
use_confidence = USE_CONFIDENCE, # True,
lambda_conf_avg = 0.01,
# lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 2.0,
error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
tf_wlosses = tf.multiply(G_losses, tf_partial_weights, name = "tf_wlosses")
G_losses_sum = tf.reduce_sum(tf_wlosses, name = "G_losses_sum")
if SLOSS_LAMBDA > 0:
S_loss, rslt_cost_nw, rslt_cost_w, rslt_d , rslt_avg_disparity, rslt_gt_disparity, rslt_offs = qcstereo_losses.smoothLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result
target_disparity_batch = target_disparity_batch, # [batch_size] tf placeholder
gt_ds_batch_clust = gt_ds_batch_clust, # [batch_size,25,2] tf placeholder
clip = SLOSS_CLIP,
absolute_disparity = ABSOLUTE_DISPARITY, #when false there should be no activation on disparity output !
cluster_radius = CLUSTER_RADIUS)
GS_loss = tf.add(G_losses_sum, SLOSS_LAMBDA * S_loss, name = "GS_loss")
else:
S_loss = tf.constant(0.0, dtype=tf.float32,name = "S_loss")
GS_loss = G_losses_sum # G_loss
if WLOSS_LAMBDA > 0.0:
W_loss = qcstereo_losses.weightsLoss(
inp_weights = inp_weights[0], # inp_weights - list of tensors, currently - just [0]
tile_layers= TILE_LAYERS, # 4
tile_side = TILE_SIDE, # 9
wborders_zero = WBORDERS_ZERO)
GW_loss = tf.add(GS_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
else:
GW_loss = GS_loss # G_loss
W_loss = tf.constant(0.0, dtype=tf.float32,name = "W_loss")
GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8)
center_indx = 4, # center tile index
gt_ds_batch = next_element_tt['gt_ds'])# [?:18]
tf_ph_G_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='G_loss_avg')
tf_ph_G_losses = tf.compat.v1.placeholder(tf.float32,shape=[len(partials)],name='G_losses_avg')
tf_ph_S_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='S_loss_avg')
tf_ph_W_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='W_loss_avg')
tf_ph_GW_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='GW_loss_avg')
tf_ph_sq_diff = tf.compat.v1.placeholder(tf.float32,shape=None,name='sq_diff_avg')
tf_gtvar_diff = tf.compat.v1.placeholder(tf.float32,shape=None,name='gtvar_diff')
tf_img_test0 = tf.compat.v1.placeholder(tf.float32,shape=None,name='img_test0')
tf_img_test9 = tf.compat.v1.placeholder(tf.float32,shape=None,name='img_test9')
"""
with tf.name_scope('sample'):
tf.compat.v1.summary.scalar("GW_loss", GW_loss)
tf.compat.v1.summary.scalar("G_loss", G_loss)
tf.compat.v1.summary.scalar("S_loss", S_loss)
tf.compat.v1.summary.scalar("W_loss", W_loss)
tf.compat.v1.summary.scalar("sq_diff", _cost1)
tf.compat.v1.summary.scalar("gtvar_diff", GT_variance)
"""
with tf.name_scope('epoch_average'):
for i in range(tf_ph_G_losses.shape[0]):
tf.compat.v1.summary.scalar("G_loss_epoch_"+str(i), tf_ph_G_losses[i])
tf.compat.v1.summary.scalar("GW_loss_epoch", tf_ph_GW_loss)
tf.compat.v1.summary.scalar("G_loss_epoch", tf_ph_G_loss)
tf.compat.v1.summary.scalar("S_loss_epoch", tf_ph_S_loss)
tf.compat.v1.summary.scalar("W_loss_epoch", tf_ph_W_loss)
tf.compat.v1.summary.scalar("sq_diff_epoch", tf_ph_sq_diff)
tf.compat.v1.summary.scalar("gtvar_diff", tf_gtvar_diff)
tf.compat.v1.summary.scalar("img_test0", tf_img_test0)
tf.compat.v1.summary.scalar("img_test9", tf_img_test9)
trainable_vars= tf.trainable_variables()
lr= tf.compat.v1.placeholder(tf.float32)
G_opt= tf.compat.v1.train.AdamOptimizer(learning_rate=lr).minimize(GW_loss)
ROOT_PATH = './attic/nn_ds_neibs30_graph'+SUFFIX+"/" # for tensorboard
TT_SUBDIRS = ['train'] #,'test0','test1','test2','test3']
for i,_ in enumerate(datasets_test):
# TT_SUBDIRS.append('test%d'%(i))
TT_SUBDIRS.append(TEST_TITLES[i].replace(' ','_'))
TT_PATHS = [ROOT_PATH + p for p in TT_SUBDIRS]
# CLEAN OLD STAFF
shutil.rmtree(ROOT_PATH, ignore_errors=True)
#for p in TT_PATHS:
# shutil.rmtree(p, ignore_errors=True)
#seems that runs use directory creation time to order graphs
#for p in TT_PATHS:
# os.makedirs(p)
# time.sleep(1.5) # reduce later
num_train_subs = len(train_next) # number of (different type) merged training sets
dataset_train_size = TRAIN_BUFFER_GPU * num_train_subs # TRAIN_BUFFER_SIZE
tt_summaries = [0.0 for e in TT_SUBDIRS]
tt2_avg = [0.0 for e in TT_SUBDIRS]
tt_gw_avg = [0.0 for e in TT_SUBDIRS]
tt_g_avgs = [[0.0]*len(partials) for e in TT_SUBDIRS]
tt_w_avg = [0.0 for e in TT_SUBDIRS]
tt_s_avg = [0.0 for e in TT_SUBDIRS]
tt_gtvar_avg = [0.0 for e in TT_SUBDIRS]
saver=tf.compat.v1.train.Saver(trainable_vars)
saver_def = saver.as_saver_def()
# The name of the tensor you must feed with a filename when saving/restoring.
print ('saver_def.filename_tensor_name=',saver_def.filename_tensor_name)
# The name of the target operation you must run when restoring.
print ('saver_def.restore_op_name=',saver_def.restore_op_name)
# The name of the target operation you must run when saving.
print ('saver_def.save_tensor_name=',saver_def.save_tensor_name)
try:
os.makedirs(os.path.dirname(files['checkpoints']))
print ("Created directory ",os.path.dirname(files['checkpoints']))
# os.makedirs(files['checkpoints'])
except:
pass
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
merged = tf.summary.merge_all()
tt_writers = []
for p in TT_PATHS:
tt_writers.append(tf.summary.FileWriter(p, sess.graph))
print ("Adding delay to make directory creation time different: "+p)
time.sleep(2.0) # reduce later
loss_gw_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_g_train_hists= [np.empty(dataset_train_size, dtype=np.float32) for p in partials]
loss_s_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_w_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_gw_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_g_test_hists= [np.empty(dataset_test_size, dtype=np.float32) for p in partials]
loss_s_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_w_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss2_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss2_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train_hist= np.empty(dataset_train_size, dtype=np.float32)
gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train = 0.0
gtvar_test = 0.0
img_gain_test0 = 1.0
img_gain_test9 = 1.0
thr=None
thr_result = None
trains_to_update = [train_next[n_train]['more_files'] for n_train in range(len(train_next))]
for epoch in range (EPOCHS_TO_RUN):
"""
update files after each epoch, all 4.
Convert to threads after testing
"""
if (FILE_UPDATE_EPOCHS > 0) and (epoch % FILE_UPDATE_EPOCHS == 0):
if not thr is None:
if thr.is_alive():
qsf.print_time("***WAITING*** until tfrecord gets loaded", end=" ")
else:
qsf.print_time("tfrecord is ***ALREADY LOADED*** ", end=" ")
thr.join()
qsf.print_time("Done")
qsf.print_time("Inserting new data", end=" ")
for n_train in range(len(trains_to_update)):
if trains_to_update[n_train]:
qsf.add_file_to_dataset(dataset = dataset_train,
new_dataset = thr_result[n_train],
train_next = train_next[n_train])
qsf.print_time("Done")
thr_result = []
fpaths = []
for n_train in range(len(trains_to_update)):
if trains_to_update[n_train]:
fpaths.append(files['train'][n_train][train_next[n_train]['file']])
qsf.print_time("Will read in background: "+fpaths[-1])
thr = Thread(target=qsf.getMoreFiles, args=(fpaths,thr_result, CLUSTER_RADIUS, HOR_FLIP, TILE_LAYERS, TILE_SIDE))
thr.start()
train_buf_index = epoch % TRAIN_BUFFER_CPU # GPU memory from CPU memory (now 4)
if epoch >=600:
learning_rate = LR600
elif epoch >=400:
learning_rate = LR400
elif epoch >=200:
learning_rate = LR200
elif epoch >=100:
learning_rate = LR100
else:
learning_rate = LR
if (train_buf_index == 0) and SHUFFLE_FILES:
qsf.print_time("Shuffling how datasets datasets_train_lvar and datasets_train_hvar are zipped together", end="")
qsf.shuffle_in_place(
dataset_data = dataset_train, #alternating clusters from 4 sources.each cluster has all needed data (concatenated)
period = num_train_subs)
qsf.print_time(" Done")
sti = train_buf_index * dataset_train_size * BATCH_SIZE # TRAIN_BUFFER_GPU * num_train_subs
eti = sti+ dataset_train_size * BATCH_SIZE# (train_buf_index +1) * TRAIN_BUFFER_GPU * num_train_subs
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_train[sti:eti,:corr2d_len],
target_disparity_train_placeholder: dataset_train[sti:eti,corr2d_len:corr2d_len+target_disparity_len],
gt_ds_train_placeholder: dataset_train[sti:eti,corr2d_len+target_disparity_len:corr2d_len+target_disparity_len+gtds_len] })
for i in range(dataset_train_size):
# try:
_, GW_loss_trained, G_losses_trained, S_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[ G_opt,
GW_loss,
G_losses,
S_loss,
W_loss,
outs[0],
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
GT_variance
],
feed_dict={tf_batch_weights: feed_batch_weights,
lr: learning_rate
}) # previous value of *_avg #Fetch argument 0.0 has invalid type , must be a string or Tensor. (Can not convert a float into a Tensor or Operation.)
loss_gw_train_hist[i] = GW_loss_trained
for nn, gl in enumerate(G_losses_trained):
loss_g_train_hists[nn][i] = gl
loss_s_train_hist[i] = S_loss_trained
loss_w_train_hist[i] = W_loss_trained
loss2_train_hist[i] = out_cost1
gtvar_train_hist[i] = gt_variance
# except tf.errors.OutOfRangeError:
# print("****** NO MORE DATA! train done at step %d"%(i))
# break
tt_gw_avg[0] = np.average(loss_gw_train_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_train_hists):
tt_g_avgs[0][nn] = np.average(lgth).astype(np.float32)
tt_s_avg[0] = np.average(loss_s_train_hist).astype(np.float32)
tt_w_avg[0] = np.average(loss_w_train_hist).astype(np.float32)
tt2_avg[0] = np.average(loss2_train_hist).astype(np.float32)
tt_gtvar_avg[0] = np.average(gtvar_train_hist).astype(np.float32)
for ntest,dataset_test in enumerate(datasets_test):
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_test[:, :corr2d_len], #['corr2d'],
target_disparity_train_placeholder: dataset_test[:, corr2d_len:corr2d_len+target_disparity_len], # ['target_disparity'],
gt_ds_train_placeholder: dataset_test[:, corr2d_len+target_disparity_len:] }) # ['gt_ds']})
"""
TODO: Make it possible to have different length dataset_test arrays to mix different length test files
"""
for i in range(dataset_test_size):
# for i in range(dataset_test.shape[0]):
# try:
GW_loss_tested, G_losses_tested, S_loss_tested, W_loss_tested, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[GW_loss,
G_losses,
S_loss,
W_loss,
outs[0],
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
GT_variance
],
feed_dict={tf_batch_weights: feed_batch_weight_1 , # feed_batch_weights,
lr: learning_rate
}) # previous value of *_avg
loss_gw_test_hist[i] = GW_loss_tested
for nn, gl in enumerate(G_losses_tested):
loss_g_test_hists[nn][i] = gl
loss_s_test_hist[i] = S_loss_tested
loss_w_test_hist[i] = W_loss_tested
loss2_test_hist[i] = out_cost1
gtvar_test_hist[i] = gt_variance
# except tf.errors.OutOfRangeError:
# print("test done at step %d"%(i))
# break
tt_gw_avg[ntest+1] = np.average(loss_gw_test_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_test_hists):
tt_g_avgs[ntest+1][nn] = np.average(lgth).astype(np.float32)
tt_s_avg[ntest+1] = np.average(loss_s_test_hist).astype(np.float32)
tt_w_avg[ntest+1] = np.average(loss_w_test_hist).astype(np.float32)
tt2_avg[ntest+1] = np.average(loss2_test_hist).astype(np.float32)
tt_gtvar_avg[ntest+1] = np.average(gtvar_test_hist).astype(np.float32)
if (((epoch + 1) == EPOCHS_TO_RUN) or (((epoch + 1) % EPOCHS_FULL_TEST) == 0)) and (len(image_data) > 0) :
lf = None
if (epoch + 1) == EPOCHS_TO_RUN: # last
print("Last epoch, removing train/test datasets to reduce memory footprint")
del(dataset_train)
del(dataset_test)
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
last_epoch = (epoch + 1) == EPOCHS_TO_RUN
ind_img = [0]
if last_epoch:
ind_img = [i for i in range(len(image_data))]
###################################################
# Read the full image
###################################################
## test_summaries_img = [0.0]*len(ind_img) # datasets_img)
disp_out= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_d= np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((WIDTH*HEIGHT), dtype=np.float32)
dbg_offs = np.empty((WIDTH*HEIGHT), dtype=np.float32)
for ntest in ind_img: # datasets_img):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = ntest,
cluster_radius = CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True)
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_img['corr2d'],
target_disparity_train_placeholder: dataset_img['target_disparity'],
gt_ds_train_placeholder: dataset_img['gt_ds']})
for start_offs in range(0,disp_out.shape[0],BATCH_SIZE):
end_offs = min(start_offs+BATCH_SIZE,disp_out.shape[0])
# try:
output, cost_nw, cost_w, dd, avg_disparity, gt_disparity, offs = sess.run(
[outs[0], # {?,1]
rslt_cost_nw, #[?,]
rslt_cost_w, #[?,]
rslt_d, #[?,]
rslt_avg_disparity,
rslt_gt_disparity,
rslt_offs
],
feed_dict={
tf_batch_weights: feed_batch_weight_1 # feed_batch_weights,
}) # previous value of *_avg
# except tf.errors.OutOfRangeError:
# print("test done at step %d"%(i))
# break
# try:
disp_out[start_offs:end_offs] = output.flatten()
dbg_cost_nw[start_offs:end_offs] = cost_nw.flatten()
dbg_cost_w [start_offs:end_offs] = cost_w.flatten()
dbg_d[start_offs:end_offs] = dd.flatten()
dbg_avg_disparity[start_offs:end_offs] = avg_disparity.flatten()
dbg_gt_disparity[start_offs:end_offs] = gt_disparity.flatten()
dbg_offs[start_offs:end_offs] = offs.flatten()
# except ValueError:
# print("dataset_img_size= %d, i=%d, output.shape[0]=%d "%(dataset_img_size, i, output.shape[0]))
# break;
pass
result_file = files['result'][ntest] # result_files[ntest]
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest],
dataset_img['gtruths'], # gtruths[ntest],
dbg_cost_nw.reshape(-1,1),
dbg_cost_w.reshape(-1,1),
dbg_d.reshape(-1,1),
dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1)],1)
np.save(result_file, rslt.reshape(HEIGHT,WIDTH,-1))
rslt = qsf.eval_results(result_file, ABSOLUTE_DISPARITY, radius=CLUSTER_RADIUS, logfile=lf)
img_gain_test0 = rslt[0][0]/rslt[0][1]
img_gain_test9 = rslt[9][0]/rslt[9][1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(result_file, ABSOLUTE_DISPARITY, fix_nan = True,labels=SLICE_LABELS, logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
if ntest > 0:
image_data[ntest] = None
if lf:
lf.close()
# tensorboard scalars
tt_summaries[0] = sess.run([merged],
feed_dict={ tf_ph_GW_loss: tt_gw_avg[0],
tf_ph_G_loss: tt_g_avgs[0][0], #train_g_avg,
tf_ph_G_losses: tt_g_avgs[0],
tf_ph_S_loss: tt_s_avg[0],
tf_ph_W_loss: tt_w_avg[0],
tf_ph_sq_diff: tt2_avg[0], # train2_avg,
tf_gtvar_diff: tt_gtvar_avg[0],
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9}) # previous value of *_avg #Fetch argument 0.0 has invalid type , must be a string or Tensor. (Can not convert a float into a Tensor or Operation.)
for ntest, _ in enumerate(datasets_test):
tt_summaries[ntest+1] = sess.run([merged],
feed_dict={
tf_ph_GW_loss: tt_gw_avg[ntest+1],
tf_ph_G_loss: tt_g_avgs[ntest+1][0],
tf_ph_G_losses: tt_g_avgs[ntest+1], # train_g_avgs, # temporary, there is o data fro test
tf_ph_S_loss: tt_s_avg[ntest+1],
tf_ph_W_loss: tt_w_avg[ntest+1],
tf_ph_sq_diff: tt2_avg[ntest+1], #test2_avg,
tf_gtvar_diff: tt_gtvar_avg[ntest+1],
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9})
for n,tt_writer in enumerate(tt_writers):
## tt_writer.add_summary(tt_summaries[n],epoch)
tt_writer.add_summary(tt_summaries[n][0],epoch)
# if epoch ==0 :
# print ("adding delay to make directory creation time different")
# time.sleep(2.0) # reduce later
qsf.print_time("==== %04d:%03d -> %.4f %.4f %.4f %.4f %.4f (%.4f %.4f %.4f %.4f %.4f) ===="%(
epoch,i,
tt_gw_avg[0], tt_gw_avg[1], tt_gw_avg[2], tt_gw_avg[3], tt_gw_avg[4],
tt2_avg[0], tt2_avg[1], tt2_avg[2], tt2_avg[3], tt2_avg[4]))
if (not CHECKPOINT_PERIOD is None) and (((epoch + 1) % CHECKPOINT_PERIOD) == 0):
print("Saving periodic checkpoint (trained variables only) to %s, global_step = %d"%(os.path.dirname(files['checkpoints']), epoch),end=" => ")
print(saver.save(sess, files['checkpoints'], global_step=epoch, write_meta_graph=False))
# Close writers
for tt_writer in tt_writers:
try:
tt_writer.close()
except:
print ("Could not close tt_writer: ",tt_writer)
print("Saving final checkpoint (trained variables only) to %s"%(files['checkpoints']),end=" => ")
print(saver.save(sess, files["checkpoints"]))
print("All done")
exit (0)
"""
Traceback (most recent call last):
File "nn_ds_neibs30.py", line 721, in
tt2_avg[0], tt2_avg[1], tt2_avg[2], tt2_avg[3], tt2_avg[4]))
ValueError: unsupported format character ' ' (0x20) at index 20
""" lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/nn_ds_neibs31.py 0000664 0000000 0000000 00000112142 13517677053 0023405 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018-2019, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#python3 nn_ds_neibs31.py /data_ssd/lwir_sets/conf/qcstereo_lwir21.xml /data_ssd/lwir_sets/
#tensorboard --logdir="nn_ds_neibs30_graph13-9RNSWLAM0.5SLAM0.1SCLP0.2_nG_nI_HF_CP0.3_S0.03" --port=7001
import os
import sys
import numpy as np
import time
import shutil
from threading import Thread
import qcstereo_network
import qcstereo_losses
import qcstereo_functions as qsf
import tensorflow as tf
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
#IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, _ = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
CORR2D_LIMITS = [None, None]
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS = None
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
PARTIALS_WEIGHTS, MAX_IMGS_IN_MEM, MAX_FILES_PER_GROUP, BATCH_WEIGHTS, ONLY_TILE = [None] * 5
USE_CONFIDENCE, WBORDERS_ZERO, EPOCHS_TO_RUN, FILE_UPDATE_EPOCHS = [None] * 4
LR600,LR400,LR200,LR100,LR = [None]*5
SHUFFLE_FILES, EPOCHS_FULL_TEST, SAVE_TIFFS = [None] * 3
CHECKPOINT_PERIOD = None
TRAIN_BUFFER_GPU, TRAIN_BUFFER_CPU = [None]*2
TEST_TITLES = None
LOGFILE="results.txt"
IMG_WIDTH = 20
IMG_HEIGHT = 15
"""
Next gets globals from the config file
"""
globals().update(parameters)
TRAIN_BUFFER_SIZE = TRAIN_BUFFER_GPU * TRAIN_BUFFER_CPU # in merged (quad) batches
qsf.setCorr2Limits(CORR2D_LIMITS) # limit min/max 2d correlation tiles values
#exit(0)
#WIDTH = 20 # 324
#HEIGHT = 15 # 242
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
NN_LAYOUT1 = qcstereo_network.NN_LAYOUTS[NET_ARCH1]
NN_LAYOUT2 = qcstereo_network.NN_LAYOUTS[NET_ARCH2]
USE_PARTIALS = not PARTIALS_WEIGHTS is None # False - just a single Siamese net, True - partial outputs that use concentric squares of the first level subnets
# Tiff export slice labels
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#copy config to results directory
print ("Copying config files to results directory:\n ('%s' -> '%s')"%(conf_file,dirs['result']))
try:
os.makedirs(dirs['result'])
except:
pass
shutil.copy2(conf_file,dirs['result'])
LOGPATH = os.path.join(dirs['result'],LOGFILE)
if TEST_TITLES is None:
TEST_TITLES = qsf.defaultTestTitles(files)
partials = None
partials = qsf.concentricSquares(CLUSTER_RADIUS)
PARTIALS_WEIGHTS = [1.0*pw/sum(PARTIALS_WEIGHTS) for pw in PARTIALS_WEIGHTS]
if not USE_PARTIALS:
partials = partials[0:1]
PARTIALS_WEIGHTS = [1.0]
qsf.evaluateAllResults(result_files = files['result'],
absolute_disparity = ABSOLUTE_DISPARITY,
cluster_radius = CLUSTER_RADIUS,
fgbg_mode= FGBG_MODE,
labels = qsf.SLICE_LABELS,
logpath= LOGPATH)
image_data = qsf.initImageData(
files = files,
max_imgs = MAX_IMGS_IN_MEM,
cluster_radius = CLUSTER_RADIUS,
tile_layers = TILE_LAYERS,
tile_side = TILE_SIDE,
width = IMG_WIDTH,
replace_nans = True)
corr2d_len, target_disparity_len, gtds_len, _ = qsf.get_lengths(CLUSTER_RADIUS, TILE_LAYERS, TILE_SIDE)
train_next, dataset_train, datasets_test= qsf.initTrainTestData(
files = files,
cluster_radius = CLUSTER_RADIUS,
buffer_size = TRAIN_BUFFER_SIZE * BATCH_SIZE, # number of clusters per train
test_titles = TEST_TITLES)
corr2d_train_placeholder = tf.compat.v1.placeholder(dataset_train.dtype, (None,FEATURES_PER_TILE * cluster_size)) # corr2d_train.shape)
target_disparity_train_placeholder = tf.compat.v1.placeholder(dataset_train.dtype, (None,1 * cluster_size)) #target_disparity_train.shape)
gt_ds_train_placeholder = tf.compat.v1.placeholder(dataset_train.dtype, (None,2 * cluster_size)) #gt_ds_train.shape)
dataset_tt = tf.data.Dataset.from_tensor_slices({
"corr2d": corr2d_train_placeholder,
"target_disparity": target_disparity_train_placeholder,
"gt_ds": gt_ds_train_placeholder})
tf_batch_weights = tf.compat.v1.placeholder(shape=(None,), dtype=tf.float32, name = "batch_weights") # way to increase importance of the high variance clusters
feed_batch_weights = np.array(BATCH_WEIGHTS*(BATCH_SIZE//len(BATCH_WEIGHTS)), dtype=np.float32)
feed_batch_weight_1 = np.array([1.0], dtype=np.float32)
dataset_test_size = len(datasets_test[0])
dataset_test_size //= BATCH_SIZE
dataset_img_size = len(image_data[0]['corr2d'])
dataset_img_size //= BATCH_SIZE
dataset_tt = dataset_tt.batch(BATCH_SIZE)
dataset_tt = dataset_tt.prefetch(BATCH_SIZE)
iterator_tt = dataset_tt.make_initializable_iterator()
next_element_tt = iterator_tt.get_next()
result_dir = './attic/result_neibs_'+ SUFFIX+'/'
checkpoint_dir = './attic/result_neibs_'+ SUFFIX+'/'
save_freq = 500
def debug_gt_variance(
indx, # This tile index (0..8)
center_indx, # center tile index
gt_ds_batch # [?:9:2]
):
with tf.name_scope("Debug_GT_Variance"):
d_gt_this = tf.reshape(gt_ds_batch[:,2 * indx],[-1], name = "d_this")
d_gt_center = tf.reshape(gt_ds_batch[:,2 * center_indx],[-1], name = "d_center")
d_gt_diff = tf.subtract(d_gt_this, d_gt_center, name = "d_diff")
d_gt_diff2 = tf.multiply(d_gt_diff, d_gt_diff, name = "d_diff2")
d_gt_var = tf.reduce_mean(d_gt_diff2, name = "d_gt_var")
return d_gt_var
target_disparity_cluster = tf.reshape(next_element_tt['target_disparity'], [-1,cluster_size, 1], name="targdisp_cluster")
corr2d_Nx325 = tf.concat([tf.reshape(next_element_tt['corr2d'],[-1,cluster_size,FEATURES_PER_TILE], name="coor2d_cluster"),
target_disparity_cluster], axis=2, name = "corr2d_Nx325")
if SPREAD_CONVERGENCE:
outs, inp_weights = qcstereo_network.networks_siam(
input = corr2d_Nx325,
input_global = target_disparity_cluster,
layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2,
inter_convergence = INTER_CONVERGENCE,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE, #Remove/put None for normal operation
partials = partials,
use_confidence= USE_CONFIDENCE,
cluster_radius = CLUSTER_RADIUS)
else:
outs, inp_weights = qcstereo_network.networks_siam(
input_tensor= corr2d_Nx325,
input_global = None,
layout1 = NN_LAYOUT1,
layout2 = NN_LAYOUT2,
inter_convergence = False,
sym8 = SYM8_SUB,
only_tile = ONLY_TILE, #Remove/put None for normal operation
partials = partials,
use_confidence= USE_CONFIDENCE,
cluster_radius = CLUSTER_RADIUS)
tf_partial_weights = tf.constant(PARTIALS_WEIGHTS,dtype=tf.float32,name="partial_weights")
G_losses = [0.0]*len(partials)
target_disparity_batch= next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1]
gt_ds_batch_clust = next_element_tt['gt_ds']
gt_ds_batch = gt_ds_batch_clust[:,2 * center_tile_index: 2 * (center_tile_index +1)]
G_losses[0], _disp_slice, _d_gt_slice, _out_diff, _out_diff2, _w_norm, _out_wdiff2, _cost1 = qcstereo_losses.batchLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result
target_disparity_batch= target_disparity_batch, # next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1], # target_disparity_batch_center, # next_element_tt['target_disparity'], # target_disparity, ### target_d, # [batch_size] tf placeholder
gt_ds_batch = gt_ds_batch, # next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)], # gt_ds_batch_center, ## next_element_tt['gt_ds'], # gt_ds, ### gt, # [batch_size,2] tf placeholder
batch_weights = tf_batch_weights,
disp_diff_cap = DISP_DIFF_CAP,
disp_diff_slope= DISP_DIFF_SLOPE,
absolute_disparity = ABSOLUTE_DISPARITY,
use_confidence = USE_CONFIDENCE, # True,
lambda_conf_avg = 0.01,
## lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 2.0,
error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
G_loss = G_losses[0]
for n in range (1,len(partials)):
G_losses[n], _, _, _, _, _, _, _ = qcstereo_losses.batchLoss(
out_batch = outs[n], # [batch_size,(1..2)] tf_result
target_disparity_batch= target_disparity_batch, #next_element_tt['target_disparity'][:,center_tile_index:center_tile_index+1], # target_disparity_batch_center, # next_element_tt['target_disparity'], # target_disparity, ### target_d, # [batch_size] tf placeholder
gt_ds_batch = gt_ds_batch, # next_element_tt['gt_ds'][:,2 * center_tile_index: 2 * (center_tile_index +1)], # gt_ds_batch_center, ## next_element_tt['gt_ds'], # gt_ds, ### gt, # [batch_size,2] tf placeholder
batch_weights = tf_batch_weights,
disp_diff_cap = DISP_DIFF_CAP,
disp_diff_slope= DISP_DIFF_SLOPE,
absolute_disparity = ABSOLUTE_DISPARITY,
use_confidence = USE_CONFIDENCE, # True,
lambda_conf_avg = 0.01,
# lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 2.0,
error2_offset = 0, # 0.0025, # (0.05^2)
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False) # use calculated disparity for disparity weight boosting (False - use target disparity)
tf_wlosses = tf.multiply(G_losses, tf_partial_weights, name = "tf_wlosses")
G_losses_sum = tf.reduce_sum(tf_wlosses, name = "G_losses_sum")
if SLOSS_LAMBDA > 0:
S_loss, rslt_cost_nw, rslt_cost_w, rslt_d , rslt_avg_disparity, rslt_gt_disparity, rslt_offs = qcstereo_losses.smoothLoss(
out_batch = outs[0], # [batch_size,(1..2)] tf_result
target_disparity_batch = target_disparity_batch, # [batch_size] tf placeholder
gt_ds_batch_clust = gt_ds_batch_clust, # [batch_size,25,2] tf placeholder
clip = SLOSS_CLIP,
absolute_disparity = ABSOLUTE_DISPARITY, #when false there should be no activation on disparity output !
cluster_radius = CLUSTER_RADIUS)
GS_loss = tf.add(G_losses_sum, SLOSS_LAMBDA * S_loss, name = "GS_loss")
else:
S_loss = tf.constant(0.0, dtype=tf.float32,name = "S_loss")
GS_loss = G_losses_sum # G_loss
if WLOSS_LAMBDA > 0.0:
W_loss = qcstereo_losses.weightsLoss(
inp_weights = inp_weights[0], # inp_weights - list of tensors, currently - just [0]
tile_layers= TILE_LAYERS, # 4
tile_side = TILE_SIDE, # 9
wborders_zero = WBORDERS_ZERO)
GW_loss = tf.add(GS_loss, WLOSS_LAMBDA * W_loss, name = "GW_loss")
else:
GW_loss = GS_loss # G_loss
W_loss = tf.constant(0.0, dtype=tf.float32,name = "W_loss")
GT_variance = debug_gt_variance(indx = 0, # This tile index (0..8)
center_indx = 4, # center tile index
gt_ds_batch = next_element_tt['gt_ds'])# [?:18]
tf_ph_G_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='G_loss_avg')
tf_ph_G_losses = tf.compat.v1.placeholder(tf.float32,shape=[len(partials)],name='G_losses_avg')
tf_ph_S_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='S_loss_avg')
tf_ph_W_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='W_loss_avg')
tf_ph_GW_loss = tf.compat.v1.placeholder(tf.float32,shape=None,name='GW_loss_avg')
tf_ph_sq_diff = tf.compat.v1.placeholder(tf.float32,shape=None,name='sq_diff_avg')
tf_gtvar_diff = tf.compat.v1.placeholder(tf.float32,shape=None,name='gtvar_diff')
tf_img_test0 = tf.compat.v1.placeholder(tf.float32,shape=None,name='img_test0')
tf_img_test9 = tf.compat.v1.placeholder(tf.float32,shape=None,name='img_test9')
"""
with tf.name_scope('sample'):
tf.compat.v1.summary.scalar("GW_loss", GW_loss)
tf.compat.v1.summary.scalar("G_loss", G_loss)
tf.compat.v1.summary.scalar("S_loss", S_loss)
tf.compat.v1.summary.scalar("W_loss", W_loss)
tf.compat.v1.summary.scalar("sq_diff", _cost1)
tf.compat.v1.summary.scalar("gtvar_diff", GT_variance)
"""
with tf.name_scope('epoch_average'):
for i in range(tf_ph_G_losses.shape[0]):
tf.compat.v1.summary.scalar("G_loss_epoch_"+str(i), tf_ph_G_losses[i])
tf.compat.v1.summary.scalar("GW_loss_epoch", tf_ph_GW_loss)
tf.compat.v1.summary.scalar("G_loss_epoch", tf_ph_G_loss)
tf.compat.v1.summary.scalar("S_loss_epoch", tf_ph_S_loss)
tf.compat.v1.summary.scalar("W_loss_epoch", tf_ph_W_loss)
tf.compat.v1.summary.scalar("sq_diff_epoch", tf_ph_sq_diff)
tf.compat.v1.summary.scalar("gtvar_diff", tf_gtvar_diff)
tf.compat.v1.summary.scalar("Disparity error", tf_img_test0)
tf.compat.v1.summary.scalar("NN gain over heuristic", tf_img_test9)
trainable_vars= tf.trainable_variables()
lr= tf.compat.v1.placeholder(tf.float32)
G_opt= tf.compat.v1.train.AdamOptimizer(learning_rate=lr).minimize(GW_loss)
ROOT_PATH = './attic/nn_ds_neibs30_graph'+SUFFIX+"/" # for tensorboard
TT_SUBDIRS = ['train'] #,'test0','test1','test2','test3']
for i,_ in enumerate(datasets_test):
# TT_SUBDIRS.append('test%d'%(i))
TT_SUBDIRS.append(TEST_TITLES[i].replace(' ','_'))
TT_PATHS = [ROOT_PATH + p for p in TT_SUBDIRS]
# CLEAN OLD STAFF
shutil.rmtree(ROOT_PATH, ignore_errors=True)
#for p in TT_PATHS:
# shutil.rmtree(p, ignore_errors=True)
#seems that runs use directory creation time to order graphs
#for p in TT_PATHS:
# os.makedirs(p)
# time.sleep(1.5) # reduce later
num_train_subs = len(train_next) # number of (different type) merged training sets
dataset_train_size = TRAIN_BUFFER_GPU * num_train_subs # TRAIN_BUFFER_SIZE
tt_summaries = [0.0 for e in TT_SUBDIRS]
tt2_avg = [0.0 for e in TT_SUBDIRS]
tt_gw_avg = [0.0 for e in TT_SUBDIRS]
tt_g_avgs = [[0.0]*len(partials) for e in TT_SUBDIRS]
tt_w_avg = [0.0 for e in TT_SUBDIRS]
tt_s_avg = [0.0 for e in TT_SUBDIRS]
tt_gtvar_avg = [0.0 for e in TT_SUBDIRS]
saver=tf.compat.v1.train.Saver(trainable_vars)
saver_def = saver.as_saver_def()
# The name of the tensor you must feed with a filename when saving/restoring.
print ('saver_def.filename_tensor_name=',saver_def.filename_tensor_name)
# The name of the target operation you must run when restoring.
print ('saver_def.restore_op_name=',saver_def.restore_op_name)
# The name of the target operation you must run when saving.
print ('saver_def.save_tensor_name=',saver_def.save_tensor_name)
try:
os.makedirs(os.path.dirname(files['checkpoints']))
print ("Created directory ",os.path.dirname(files['checkpoints']))
# os.makedirs(files['checkpoints'])
except:
pass
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
merged = tf.compat.v1.summary.merge_all()
tt_writers = []
for p in TT_PATHS:
tt_writers.append(tf.summary.FileWriter(p, sess.graph))
print ("Adding delay to make directory creation time different: "+p)
time.sleep(2.0) # reduce later
loss_gw_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_g_train_hists= [np.empty(dataset_train_size, dtype=np.float32) for p in partials]
loss_s_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_w_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss_gw_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_g_test_hists= [np.empty(dataset_test_size, dtype=np.float32) for p in partials]
loss_s_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss_w_test_hist= np.empty(dataset_test_size, dtype=np.float32)
loss2_train_hist= np.empty(dataset_train_size, dtype=np.float32)
loss2_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train_hist= np.empty(dataset_train_size, dtype=np.float32)
gtvar_test_hist= np.empty(dataset_test_size, dtype=np.float32)
gtvar_train = 0.0
gtvar_test = 0.0
img_gain_test0 = 0.2
img_gain_test9 = 1.0
thr=None
thr_result = None
trains_to_update = [train_next[n_train]['more_files'] for n_train in range(len(train_next))]
for epoch in range (EPOCHS_TO_RUN):
"""
update files after each epoch, all 4.
Convert to threads after testing
"""
if (FILE_UPDATE_EPOCHS > 0) and (epoch % FILE_UPDATE_EPOCHS == 0):
if not thr is None:
if thr.is_alive():
qsf.print_time("***WAITING*** until tfrecord gets loaded", end=" ")
else:
qsf.print_time("tfrecord is ***ALREADY LOADED*** ", end=" ")
thr.join()
qsf.print_time("Done")
qsf.print_time("Inserting new data", end=" ")
for n_train in range(len(trains_to_update)):
if trains_to_update[n_train]:
qsf.add_file_to_dataset(dataset = dataset_train,
new_dataset = thr_result[n_train],
train_next = train_next[n_train])
qsf.print_time("Done")
thr_result = []
fpaths = []
for n_train in range(len(trains_to_update)):
if trains_to_update[n_train]:
fpaths.append(files['train'][n_train][train_next[n_train]['file']])
qsf.print_time("Will read in background: "+fpaths[-1])
thr = Thread(target=qsf.getMoreFiles, args=(fpaths,thr_result, CLUSTER_RADIUS, HOR_FLIP, TILE_LAYERS, TILE_SIDE))
thr.start()
train_buf_index = epoch % TRAIN_BUFFER_CPU # GPU memory from CPU memory (now 4)
if epoch >=600:
learning_rate = LR600
elif epoch >=400:
learning_rate = LR400
elif epoch >=200:
learning_rate = LR200
elif epoch >=100:
learning_rate = LR100
else:
learning_rate = LR
if (train_buf_index == 0) and SHUFFLE_FILES:
qsf.print_time("Shuffling how datasets datasets_train_lvar and datasets_train_hvar are zipped together", end="")
qsf.shuffle_in_place(
dataset_data = dataset_train, #alternating clusters from 4 sources.each cluster has all needed data (concatenated)
period = num_train_subs)
qsf.print_time(" Done")
sti = train_buf_index * dataset_train_size * BATCH_SIZE # TRAIN_BUFFER_GPU * num_train_subs
eti = sti+ dataset_train_size * BATCH_SIZE# (train_buf_index +1) * TRAIN_BUFFER_GPU * num_train_subs
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_train[sti:eti,:corr2d_len],
target_disparity_train_placeholder: dataset_train[sti:eti,corr2d_len:corr2d_len+target_disparity_len],
gt_ds_train_placeholder: dataset_train[sti:eti,corr2d_len+target_disparity_len:corr2d_len+target_disparity_len+gtds_len] })
for i in range(dataset_train_size):
# try:
_, GW_loss_trained, G_losses_trained, S_loss_trained, W_loss_trained, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[ G_opt,
GW_loss,
G_losses,
S_loss,
W_loss,
outs[0],
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
GT_variance
],
feed_dict={tf_batch_weights: feed_batch_weights,
lr: learning_rate
}) # previous value of *_avg #Fetch argument 0.0 has invalid type , must be a string or Tensor. (Can not convert a float into a Tensor or Operation.)
loss_gw_train_hist[i] = GW_loss_trained
for nn, gl in enumerate(G_losses_trained):
loss_g_train_hists[nn][i] = gl
loss_s_train_hist[i] = S_loss_trained
loss_w_train_hist[i] = W_loss_trained
loss2_train_hist[i] = out_cost1
gtvar_train_hist[i] = gt_variance
# except tf.errors.OutOfRangeError:
# print("****** NO MORE DATA! train done at step %d"%(i))
# break
tt_gw_avg[0] = np.average(loss_gw_train_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_train_hists):
tt_g_avgs[0][nn] = np.average(lgth).astype(np.float32)
tt_s_avg[0] = np.average(loss_s_train_hist).astype(np.float32)
tt_w_avg[0] = np.average(loss_w_train_hist).astype(np.float32)
tt2_avg[0] = np.average(loss2_train_hist).astype(np.float32)
tt_gtvar_avg[0] = np.average(gtvar_train_hist).astype(np.float32)
for ntest,dataset_test in enumerate(datasets_test):
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_test[:, :corr2d_len], #['corr2d'],
target_disparity_train_placeholder: dataset_test[:, corr2d_len:corr2d_len+target_disparity_len], # ['target_disparity'],
gt_ds_train_placeholder: dataset_test[:, corr2d_len+target_disparity_len:] }) # ['gt_ds']})
"""
TODO: Make it possible to have different length dataset_test arrays to mix different length test files
"""
for i in range(dataset_test_size):
# for i in range(dataset_test.shape[0]):
# try:
GW_loss_tested, G_losses_tested, S_loss_tested, W_loss_tested, output, disp_slice, d_gt_slice, out_diff, out_diff2, w_norm, out_wdiff2, out_cost1, gt_variance = sess.run(
[GW_loss,
G_losses,
S_loss,
W_loss,
outs[0],
_disp_slice,
_d_gt_slice,
_out_diff,
_out_diff2,
_w_norm,
_out_wdiff2,
_cost1,
GT_variance
],
feed_dict={tf_batch_weights: feed_batch_weight_1 , # feed_batch_weights,
lr: learning_rate
}) # previous value of *_avg
loss_gw_test_hist[i] = GW_loss_tested
for nn, gl in enumerate(G_losses_tested):
loss_g_test_hists[nn][i] = gl
loss_s_test_hist[i] = S_loss_tested
loss_w_test_hist[i] = W_loss_tested
loss2_test_hist[i] = out_cost1
gtvar_test_hist[i] = gt_variance
# except tf.errors.OutOfRangeError:
# print("test done at step %d"%(i))
# break
tt_gw_avg[ntest+1] = np.average(loss_gw_test_hist).astype(np.float32)
for nn, lgth in enumerate(loss_g_test_hists):
tt_g_avgs[ntest+1][nn] = np.average(lgth).astype(np.float32)
tt_s_avg[ntest+1] = np.average(loss_s_test_hist).astype(np.float32)
tt_w_avg[ntest+1] = np.average(loss_w_test_hist).astype(np.float32)
tt2_avg[ntest+1] = np.average(loss2_test_hist).astype(np.float32)
tt_gtvar_avg[ntest+1] = np.average(gtvar_test_hist).astype(np.float32)
if (((epoch + 1) == EPOCHS_TO_RUN) or (((epoch + 1) % EPOCHS_FULL_TEST) == 0)) and (len(image_data) > 0) :
lf = None
if (epoch + 1) == EPOCHS_TO_RUN: # last
print("Last epoch, removing train/test datasets to reduce memory footprint")
del(dataset_train)
del(dataset_test)
if LOGPATH:
lf=open(LOGPATH,"w") #overwrite previous (or make it "a"?
last_epoch = (epoch + 1) == EPOCHS_TO_RUN
ind_img = [0]
if last_epoch:
ind_img = [i for i in range(len(image_data))]
###################################################
# Read the full image
###################################################
## test_summaries_img = [0.0]*len(ind_img) # datasets_img)
disp_out= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_nw= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_cost_w= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_d= np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_avg_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_gt_disparity = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
dbg_offs = np.empty((IMG_WIDTH * IMG_HEIGHT), dtype=np.float32)
for ntest in ind_img: # datasets_img):
dataset_img = qsf.readImageData(
image_data = image_data,
files = files,
indx = ntest,
cluster_radius = CLUSTER_RADIUS,
tile_layers = TILE_LAYERS, # 4
tile_side = TILE_SIDE, # 9
width = IMG_WIDTH, #160
replace_nans = True)
sess.run(iterator_tt.initializer, feed_dict={corr2d_train_placeholder: dataset_img['corr2d'],
target_disparity_train_placeholder: dataset_img['target_disparity'],
gt_ds_train_placeholder: dataset_img['gt_ds']})
for start_offs in range(0,disp_out.shape[0],BATCH_SIZE):
end_offs = min(start_offs+BATCH_SIZE,disp_out.shape[0])
# try:
output, cost_nw, cost_w, dd, avg_disparity, gt_disparity, offs = sess.run(
[outs[0], # {?,1]
rslt_cost_nw, #[?,]
rslt_cost_w, #[?,]
rslt_d, #[?,]
rslt_avg_disparity,
rslt_gt_disparity,
rslt_offs
],
feed_dict={
tf_batch_weights: feed_batch_weight_1 # feed_batch_weights,
}) # previous value of *_avg
# except tf.errors.OutOfRangeError:
# print("test done at step %d"%(i))
# break
# try:
disp_out[start_offs:end_offs] = output.flatten()
dbg_cost_nw[start_offs:end_offs] = cost_nw.flatten()
dbg_cost_w [start_offs:end_offs] = cost_w.flatten()
dbg_d[start_offs:end_offs] = dd.flatten()
dbg_avg_disparity[start_offs:end_offs] = avg_disparity.flatten()
dbg_gt_disparity[start_offs:end_offs] = gt_disparity.flatten()
dbg_offs[start_offs:end_offs] = offs.flatten()
# except ValueError:
# print("dataset_img_size= %d, i=%d, output.shape[0]=%d "%(dataset_img_size, i, output.shape[0]))
# break;
pass
result_file = files['result'][ntest] # result_files[ntest]
try:
os.makedirs(os.path.dirname(result_file))
except:
pass
extra = dataset_img['t_extra']
if extra is None:
extra = np.array([dataset_img['gtruths'].shape[0],0])
rslt = np.concatenate(
[disp_out.reshape(-1,1),
dataset_img['t_disps'], #t_disps[ntest], disp_out.shape[0],BATCH_SIZE
dataset_img['gtruths'], # gtruths[ntest],
dbg_cost_nw.reshape(-1,1),
dbg_cost_w.reshape(-1,1),
dbg_d.reshape(-1,1),
dbg_avg_disparity.reshape(-1,1),
dbg_gt_disparity.reshape(-1,1),
dbg_offs.reshape(-1,1),
extra, # len 3..6, #adding extra data layers
],1)
num_slices = rslt.shape[1]
np.save(
result_file,
rslt.reshape(IMG_HEIGHT, IMG_WIDTH,-1))
eval_rslt = qsf.eval_results(
result_file,
ABSOLUTE_DISPARITY,
radius=0, # CLUSTER_RADIUS,
last_fgbg_mode = 1,
logfile=lf)
# img_gain_test0 = eval_rslt[0][0]/eval_rslt[0][1]
img_gain_test0 = eval_rslt[9][1]
img_gain_test9 = eval_rslt[9][0]/eval_rslt[9][1]
if SAVE_TIFFS:
qsf.result_npy_to_tiff(
result_file,
ABSOLUTE_DISPARITY,
fix_nan = True,
labels=qsf.SLICE_LABELS[0:num_slices],
logfile=lf)
"""
Remove dataset_img (if it is not [0] to reduce memory footprint
"""
if ntest > 0:
image_data[ntest] = None
if lf:
lf.close()
# tensorboard scalars
tt_summaries[0] = sess.run([merged],
feed_dict={ tf_ph_GW_loss: tt_gw_avg[0],
tf_ph_G_loss: tt_g_avgs[0][0], #train_g_avg,
tf_ph_G_losses: tt_g_avgs[0],
tf_ph_S_loss: tt_s_avg[0],
tf_ph_W_loss: tt_w_avg[0],
tf_ph_sq_diff: tt2_avg[0], # train2_avg,
tf_gtvar_diff: tt_gtvar_avg[0],
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9}) # previous value of *_avg #Fetch argument 0.0 has invalid type , must be a string or Tensor. (Can not convert a float into a Tensor or Operation.)
for ntest, _ in enumerate(datasets_test):
tt_summaries[ntest+1] = sess.run([merged],
feed_dict={
tf_ph_GW_loss: tt_gw_avg[ntest+1],
tf_ph_G_loss: tt_g_avgs[ntest+1][0],
tf_ph_G_losses: tt_g_avgs[ntest+1], # train_g_avgs, # temporary, there is o data fro test
tf_ph_S_loss: tt_s_avg[ntest+1],
tf_ph_W_loss: tt_w_avg[ntest+1],
tf_ph_sq_diff: tt2_avg[ntest+1], #test2_avg,
tf_gtvar_diff: tt_gtvar_avg[ntest+1],
tf_img_test0: img_gain_test0,
tf_img_test9: img_gain_test9})
for n,tt_writer in enumerate(tt_writers):
## tt_writer.add_summary(tt_summaries[n],epoch)
tt_writer.add_summary(tt_summaries[n][0],epoch)
# if epoch ==0 :
# print ("adding delay to make directory creation time different")
# time.sleep(2.0) # reduce later
qsf.print_time("==== %04d:%03d -> %.4f %.4f %.4f %.4f %.4f (%.4f %.4f %.4f %.4f %.4f) ===="%(
epoch,i,
tt_gw_avg[0], tt_gw_avg[1], tt_gw_avg[2], tt_gw_avg[3], tt_gw_avg[4],
tt2_avg[0], tt2_avg[1], tt2_avg[2], tt2_avg[3], tt2_avg[4]))
if (not CHECKPOINT_PERIOD is None) and (((epoch + 1) % CHECKPOINT_PERIOD) == 0):
print("Saving periodic checkpoint (trained variables only) to %s, global_step = %d"%(os.path.dirname(files['checkpoints']), epoch),end=" => ")
print(saver.save(sess, files['checkpoints'], global_step=epoch, write_meta_graph=False))
# Close writers
for tt_writer in tt_writers:
try:
tt_writer.close()
except:
print ("Could not close tt_writer: ",tt_writer)
print("Saving final checkpoint (trained variables only) to %s"%(files['checkpoints']),end=" => ")
print(saver.save(sess, files["checkpoints"]))
print("All done")
exit (0)
"""
Traceback (most recent call last):
File "nn_ds_neibs30.py", line 721, in
tt2_avg[0], tt2_avg[1], tt2_avg[2], tt2_avg[3], tt2_avg[4]))
ValueError: unsupported format character ' ' (0x20) at index 20
""" lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/nn_eval_01.py 0000664 0000000 0000000 00000041661 13517677053 0022711 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from PIL import Image
import os
import sys
#import glob
#import numpy as np
import imagej_tiffwriter
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import qcstereo_functions as qsf
import numpy as np
#import xml.etree.ElementTree as ET
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 324 # tiles per image row
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
print ("Configuration file: " + conf_file)
parameters, dirs, files, dbg_parameters = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS,ABSOLUTE_DISPARITY = [None]*2
FIGS_EXTENSIONS = ['png','pdf','svg']
#FIGS_ESXTENSIONS = ['png','pdf','svg']
EVAL_MODES = ["train","infer"]
FIGS_SAVESHOW = ['save','show']
globals().update(parameters)
try:
FIGS_EXTENSIONS = globals()['FIGS_ESXTENSIONS'] # fixing typo in configs
except:
pass
#exit(0)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#import tensorflow.contrib.slim as slim
NN_DISP = 0
HEUR_DISP = 1
GT_DISP = 2
GT_CONF = 3
NN_NAN = 4
HEUR_NAN = 5
NN_DIFF = 6
HEUR_DIFF = 7
CONF_MAX = 0.7
ERR_AMPL = 0.3
TIGHT_TOP = 0.95
TIGHT_HPAD = 1.0
TIGHT_WPAD = 1.0
FIGSIZE = [8.5,11.0]
WOI_COLOR = "red"
TRANSPARENT = True # for export
#dbg_parameters
def get_fig_params(disparity_ranges):
fig_params = []
for dr in disparity_ranges:
if dr[-1][0]=='-':
fig_params.append(None)
else:
subs = []
for s in dr[:-1]:
mm = s[:2]
try:
lims = s[2]
except IndexError:
lims = None
subs.append({'lim_val':mm, 'lim_xy':lims})
fig_params.append({'name':dr[-1],'ranges':subs})
return fig_params
#try:
fig_params = get_fig_params(dbg_parameters['disparity_ranges'])
pass
#temporary:
TIFF_ONLY = False # True
#max_bad = 2.5 # excludes only direct bad
max_bad = 2.5 #2.5 # 1.5 # excludes only direct bad
max_diff = 1.5 # 2.0 # 5.0 # maximal max-min difference
max_target_err = 1.0 # 0.5 # maximal max-min difference
max_disp = 5.0
min_strength = 0.18 #ignore tiles below
min_neibs = 1
max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
#num_bins = 256 # number of histogram bins
num_bins = 15 # 50 # number of histogram bins
use_gt_weights = True # False # True
index_gt = 2
index_gt_weight = 3
index_heur_err = 7
index_nn_err = 6
index_mm = 8 # max-min
index_log = 9
index_bad = 10
index_num_neibs = 11
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
#min_diff = 0.25 # remove all flat tiles with spread less than this (do not show on heuristic/network disparity errors subplots
min_diff = 0 # remove all flat tiles with spread less than this
max_target_err2 = max_target_err * max_target_err
if not 'show' in FIGS_SAVESHOW:
plt.ioff()
#for mode in ['train','infer']:
for mode in ['infer']:
figs = []
ffiles = [] # no ext
def setlimsxy(lim_xy):
if not lim_xy is None:
plt.xlim(min(lim_xy[:2]),max(lim_xy[:2]))
plt.ylim(max(lim_xy[2:]),min(lim_xy[2:]))
cumul_weights = None
for nfile, fpars in enumerate(fig_params):
if not fpars is None:
img_file = files['result'][nfile]
if mode == 'infer':
img_file = img_file.replace('.npy','-infer.npy')
"""
try:
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=True)
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
except:
print ("Image file does not exist:", img_file)
continue
"""
pass
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
if True: #TIFF_ONLY:
tiff_path = img_file.replace('.npy','-test.tiff')
data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path)
imagej_tiffwriter.save(tiff_path,data,labels=labels)
"""
Calculate histograms
"""
err_heur2 = data[index_heur_err]*data[index_heur_err]
err_nn2 = data[index_nn_err]* data[index_nn_err]
diff_log2 = data[index_log]* data[index_log]
weights = (
(data[index_gt] < max_disp) &
(err_heur2 < max_target_err2) &
(data[index_bad] < max_bad) &
(data[index_gt_weight] >= min_strength) &
(data[index_num_neibs] >= min_neibs)&
#max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
(data[index_log] < max_log_to_mm * np.sqrt(data[index_mm]) )
).astype(data.dtype) # 0.0/1.1
#max_disp
#max_target_err
if use_gt_weights:
weights *= data[index_gt_weight]
mm = data[index_mm]
weh = np.nan_to_num(weights*err_heur2)
wen = np.nan_to_num(weights*err_nn2)
wel = np.nan_to_num(weights*diff_log2)
hist_weights,bin_vals = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weights, density = False)
hist_err_heur2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weh, density = False)
hist_err_nn2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wen, density = False)
hist_diff_log2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wel, density = False)
if cumul_weights is None:
cumul_weights = hist_weights
cumul_err_heur2 = hist_err_heur2
cumul_err_nn2 = hist_err_nn2
cumul_diff_log2 = hist_diff_log2
else:
cumul_weights += hist_weights
cumul_err_heur2 += hist_err_heur2
cumul_err_nn2 += hist_err_nn2
cumul_diff_log2 += hist_diff_log2
hist_err_heur2 = np.nan_to_num(hist_err_heur2/hist_weights)
hist_err_nn2 = np.nan_to_num(hist_err_nn2/hist_weights)
hist_gain2 = np.nan_to_num(hist_err_heur2/hist_err_nn2)
hist_gain = np.sqrt(hist_gain2)
hist_diff_log2 = np.nan_to_num(hist_diff_log2/hist_weights)
print("hist_err_heur2", end = " ")
print(np.sqrt(hist_err_heur2))
print("hist_err_nn2", end = " ")
print(np.sqrt(hist_err_nn2))
print("hist_gain", end = " ")
print(hist_gain)
print("hist_diff_log2", end = " ")
print(np.sqrt(hist_diff_log2))
if min_diff> 0.0:
pass
good = (mm > min_diff).astype(mm.dtype)
good /= good # good - 1, bad - nan
data[index_heur_err] *= good
data[index_nn_err] *= good
data = data.transpose(1,2,0)
if TIFF_ONLY:
continue
for subindex, rng in enumerate(fpars['ranges']):
lim_val = rng['lim_val']
lim_xy = rng['lim_xy']
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title(fpars['name'])
fig.suptitle(fpars['name'])
ax_conf=plt.subplot(322)
ax_conf.set_title("Ground truth confidence")
# fig.suptitle("Groud truth confidence")
plt.imshow(data[...,GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
if not lim_xy is None:
pass # show frame
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
plt.plot(xdata,ydata,color=WOI_COLOR)
# setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_gtd=plt.subplot(321)
ax_gtd.set_title("Ground truth disparity map")
plt.imshow(data[...,GT_DISP], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hed=plt.subplot(323)
ax_hed.set_title("Heuristic disparity map")
plt.imshow(data[...,HEUR_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nnd=plt.subplot(325)
ax_nnd.set_title("Network disparity output")
plt.imshow(data[...,NN_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hee=plt.subplot(324)
ax_hee.set_title("Heuristic disparity error")
plt.imshow(data[...,HEUR_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nne=plt.subplot(326)
ax_nne.set_title("Network disparity error")
plt.imshow(data[...,NN_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
plt.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
figs.append(fig)
fb_noext = os.path.splitext(os.path.basename(img_file))[0]#
if subindex > 0:
if subindex < 10:
fb_noext+="abcdefghi"[subindex-1]
else:
fb_noext+="-"+str(subindex)
ffiles.append(fb_noext)
pass
if True:
cumul_err_heur2 = np.nan_to_num(cumul_err_heur2/cumul_weights)
cumul_err_nn2 = np.nan_to_num(cumul_err_nn2/cumul_weights)
cumul_gain2 = np.nan_to_num(cumul_err_heur2/cumul_err_nn2)
cumul_gain = np.sqrt(cumul_gain2)
cumul_diff_log2 = np.nan_to_num(cumul_diff_log2/cumul_weights)
print("cumul_weights", end = " ")
print(cumul_weights)
print("cumul_err_heur", end = " ")
print(np.sqrt(cumul_err_heur2))
print("cumul_err_nn", end = " ")
print(np.sqrt(cumul_err_nn2))
print("cumul_gain", end = " ")
print(cumul_gain)
print("cumul_diff_log2", end = " ")
print(np.sqrt(cumul_diff_log2))
fig, ax1 = plt.subplots()
ax1.set_xlabel('3x3 tiles ground truth disparity max-min (pix)')
ax1.set_ylabel('RMSE\n(pix)', color='black', rotation='horizontal')
ax1.yaxis.set_label_coords(-0.045,0.92)
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_nn2), 'tab:red',label="network disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2), 'tab:green',label="heuristic disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_diff_log2), 'tab:cyan',label="ground truth LoG")
ax1.tick_params(axis='y', labelcolor='black')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('weight', color='black', rotation='horizontal') # we already handled the x-label with ax1
ax2.yaxis.set_label_coords(1.06,1.0)
ax2.plot(bin_vals[0:-1], cumul_weights,color='grey',dashes=[6, 2],label='weights = n_tiles * gt_confidence')
ax1.legend(loc="upper left", bbox_to_anchor=(0.2,1.0))
ax2.legend(loc="lower right", bbox_to_anchor=(1.0,0.1))
"""
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title('Cumulative')
fig.suptitle('Difference to GT')
# ax_conf=plt.subplot(322)
ax_conf=plt.subplot(211)
ax_conf.set_title("RMS vs max9-min9")
plt.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2),'red',
bin_vals[0:-1], np.sqrt(cumul_err_nn2),'green',
bin_vals[0:-1], np.sqrt(cumul_diff_log2),'blue')
figs.append(fig)
ffiles.append('cumulative')
ax_conf=plt.subplot(212)
ax_conf.set_title("weights vs max9-min9")
plt.plot(bin_vals[0:-1], cumul_weights,'black')
"""
figs.append(fig)
ffiles.append('cumulative')
pass
#bin_vals[0:-1]
# fig.suptitle("Groud truth confidence")
#
#whow to allow adjustment before applying tight_layout?
pass
for fig in figs:
fig.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
if FIGS_EXTENSIONS and figs and 'save' in FIGS_SAVESHOW:
try:
print ("Creating output directory for figures: ",dirs['figures'])
os.makedirs(dirs['figures'])
except:
pass
pp=None
if 'pdf' in FIGS_EXTENSIONS:
if mode == 'infer':
pdf_path = os.path.join(dirs['figures'],"figures-infer%s.pdf"%str(min_diff))
else:
pdf_path = os.path.join(dirs['figures'],"figures-train%s.pdf"%str(min_diff))
pp= PdfPages(pdf_path)
for fb_noext, fig in zip(ffiles,figs):
for ext in FIGS_EXTENSIONS:
if ext == 'pdf':
pass
fig.savefig(pp,format='pdf')
else:
if mode == 'infer':
noext = fb_noext+'-infer'
else:
noext = fb_noext+'-train'
fig.savefig(
fname = os.path.join(dirs['figures'],noext+"."+ext),
transparent = TRANSPARENT,
)
pass
if pp:
pp.close()
if 'show' in FIGS_SAVESHOW:
plt.show()
#FIGS_ESXTENSIONS
#qsf.evaluateAllResults(result_files = files['result'],
# absolute_disparity = ABSOLUTE_DISPARITY,
# cluster_radius = CLUSTER_RADIUS)
print("All done")
exit (0)
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/nn_eval_lwir.py 0000664 0000000 0000000 00000032504 13517677053 0023442 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from PIL import Image
import os
import sys
#import glob
#import numpy as np
import imagej_tiffwriter
import time
import imagej_tiff as ijt
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import qcstereo_functions as qsf
import numpy as np
#import xml.etree.ElementTree as ET
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
#IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
#IMG_HEIGHT = 15 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
try:
modes = [sys.argv[3]] # train, infer
except IndexError:
modes = ['train']
print ("Configuration file: " + conf_file)
parameters, dirs, files, dbg_parameters = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
IMG_WIDTH = None # 20 # 324 # tiles per image row Defined in config
IMG_HEIGHT = None # 15 # 324 # tiles per image row Defined in config
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS,ABSOLUTE_DISPARITY = [None]*2
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
FIGS_EXTENSIONS = ['png','pdf','svg']
#FIGS_ESXTENSIONS = ['png','pdf','svg']
EVAL_MODES = ["train","infer"]
FIGS_SAVESHOW = ['save','show']
globals().update(parameters)
try:
FIGS_EXTENSIONS = globals()['FIGS_ESXTENSIONS'] # fixing typo in configs
except:
pass
#exit(0)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#import tensorflow.contrib.slim as slim
#NN_DISP = 0
#HEUR_DISP = 1
#GT_DISP = 2
#GT_CONF = 3
#NN_NAN = 4
#HEUR_NAN = 5
#NN_DIFF = 6
#HEUR_DIFF = 7
# Now - more layers
CONF_MAX = 0.7
ERR_AMPL = 0.4 # 0.3
TIGHT_TOP = 0.95
TIGHT_HPAD = 1.0
TIGHT_WPAD = 1.0
FIGSIZE = [8.5,11.0]
WOI_COLOR = "red"
X_COLOR = "grey"
X_NEIBS = False
TRANSPARENT = True # for export
#dbg_parameters
def get_fig_params(disparity_ranges):
fig_params = []
for dr in disparity_ranges:
if dr[-1][0]=='-':
fig_params.append(None)
else:
subs = []
for s in dr[:-1]:
mm = s[:2]
try:
lims = s[2]
except IndexError:
lims = None
subs.append({'lim_val':mm, 'lim_xy':lims})
fig_params.append({'name':dr[-1],'ranges':subs})
return fig_params
#try:
#fig_params = get_fig_params(dbg_parameters['disparity_ranges'])
extra_path = os.path.join(root_dir, dbg_parameters['extra'])
eo_width = dbg_parameters['eo_params']['width']
eo_height = dbg_parameters['eo_params']['height']
eo_woi = dbg_parameters['eo_params']['woi'] # (x,y,width, height)
eo_disparity_scale = 1.0/dbg_parameters['eo_params']['disparity_scale'] # 14.2
image_sets = dbg_parameters['extra_paths'] # list of dictionaries
pass
#temporary:
TIFF_ONLY = False # True
#max_bad = 2.5 # excludes only direct bad
max_bad = 2.5 #2.5 # 1.5 # excludes only direct bad
max_diff = 1.5 # 2.0 # 5.0 # maximal max-min difference
max_target_err = 1.0 # 0.5 # maximal max-min difference
max_disp = 5.0
min_strength = 0.18 #ignore tiles below
min_neibs = 1
max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
#num_bins = 256 # number of histogram bins
num_bins = 15 # 50 # number of histogram bins
use_gt_weights = True # False # True
index_gt = 2
index_gt_weight = 3
index_heur_err = 7
index_nn_err = 6
index_fgbg_sngl = 10
index_fgbg_neib = 11
index_mm = 23 # 8 # max-min
index_log = 24 # 9
index_bad = 25 # 10
index_num_neibs = 26 # 11
index_fgbg = [index_fgbg_sngl,index_fgbg_neib][X_NEIBS]
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
#min_diff = 0.25 # remove all flat tiles with spread less than this (do not show on heuristic/network disparity errors subplots
min_diff = 0 # remove all flat tiles with spread less than this
max_target_err2 = max_target_err * max_target_err
if not 'show' in FIGS_SAVESHOW:
plt.ioff()
#for mode in ['train','infer']:
#for mode in ['infer']:
def cross_out(
plt,
cross_out_mask):
height = cross_out_mask.shape[0]
width = cross_out_mask.shape[1]
for row in range (height):
for col in range(width):
if cross_out_mask[row,col]:
xdata = [col-0.3, col+0.3]
ydata = [row-0.3, row+0.3]
plt.plot(xdata,ydata,color=X_COLOR)
ydata = [row+0.3, row-0.3]
plt.plot(xdata,ydata,color=X_COLOR)
for mode in modes: # ['train']:
figs = []
ffiles = [] # no ext
def setlimsxy(lim_xy):
if not lim_xy is None:
plt.xlim(min(lim_xy[:2]),max(lim_xy[:2]))
plt.ylim(max(lim_xy[2:]),min(lim_xy[2:]))
cumul_weights = None
cmap_disp=plt.get_cmap('viridis') # ('cividis')
cmap_diff=plt.get_cmap('coolwarm') #('seismic') # ('viridis')
for nfile, img_pars in enumerate(image_sets):
if not img_pars is None:
img_file = files['result'][nfile]
if mode == 'infer':
img_file = img_file.replace('.npy','-infer.npy')
print ("Processing image set: "+img_file)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
cross_out_mask = data[...,index_fgbg] < 0.5 #data.shape = (15,20,27)
# for subindex, rng in enumerate(fpars['ranges']):
lim_val = img_pars['range'] # rng['lim_val']
lim_val[0] -= ERR_AMPL
lim_xy = [-0.5, IMG_WIDTH - 0.5, -0.5, IMG_HEIGHT - 0.5] # rng['lim_xy']
#start new image page
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title(img_pars['title'])
fig.suptitle(img_pars['title'])
# Create EO DSI image
# load tiff image
img_ds_main = ijt.imagej_tiff(os.path.join(extra_path,img_pars['dsi_path'] ))
ds_main = img_ds_main.image[...,img_pars['dsi_slice']] * eo_disparity_scale
ds_main = np.maximum(ds_main, lim_val[0])
ds_main = np.minimum(ds_main, lim_val[1])
ax_conf=plt.subplot(322)
ax_conf.set_title("Hi-res camera disparity map")
plt.imshow(ds_main, vmin=lim_val[0], vmax=lim_val[1], cmap=cmap_disp)
setlimsxy([-0.5, eo_width-0.5, -0.5, eo_height - 0.5])
if not eo_woi is None:
pass # show frame
xdata=[eo_woi['x'], eo_woi['x'] + eo_woi['width'], eo_woi['x'] + eo_woi['width'], eo_woi['x'], eo_woi['x']]
ydata=[eo_woi['y'], eo_woi['y'], eo_woi['y'] + eo_woi['height'], eo_woi['y'] + eo_woi['height'], eo_woi['y']]
plt.plot(xdata,ydata,color=WOI_COLOR)
plt.colorbar(orientation='vertical') # location='bottom')
'''
# Ground truth confidence - to be replaced
ax_conf=plt.subplot(322)
ax_conf.set_title("Ground truth confidence")
plt.imshow(data[...,qsf.GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
if not lim_xy is None:
pass # show frame
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
plt.plot(xdata,ydata,color=WOI_COLOR)
plt.colorbar(orientation='vertical') # location='bottom')
'''
ax_gtd=plt.subplot(321)
ax_gtd.set_title("Ground truth disparity map")
plt.imshow(data[...,qsf.GT_DISP], vmin=lim_val[0], vmax=lim_val[1], cmap=cmap_disp)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hed=plt.subplot(323)
ax_hed.set_title("Heuristic disparity map")
plt.imshow(data[...,qsf.HEUR_NAN], vmin=lim_val[0], vmax=lim_val[1], cmap=cmap_disp)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nnd=plt.subplot(325)
ax_nnd.set_title("Network disparity output")
plt.imshow(data[...,qsf.NN_NAN], vmin=lim_val[0], vmax=lim_val[1], cmap=cmap_disp)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hee=plt.subplot(324)
ax_hee.set_title("Heuristic disparity error")
cross_out(plt, cross_out_mask)
plt.imshow(data[...,qsf.HEUR_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL, cmap=cmap_diff)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nne=plt.subplot(326)
ax_nne.set_title("Network disparity error")
plt.imshow(data[...,qsf.NN_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL, cmap=cmap_diff)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
plt.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
figs.append(fig)
fb_noext = os.path.splitext(os.path.basename(img_file))[0]#
# if subindex > 0:
# if subindex < 10:
# fb_noext+="abcdefghi"[subindex-1]
# else:
# fb_noext+="-"+str(subindex)
ffiles.append(fb_noext)
pass
#
#how to allow adjustment before applying tight_layout?
pass
for fig in figs:
fig.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
if FIGS_EXTENSIONS and figs and 'save' in FIGS_SAVESHOW:
try:
print ("Creating output directory for figures: ",dirs['figures'])
os.makedirs(dirs['figures'])
except:
pass
pp=None
if 'pdf' in FIGS_EXTENSIONS:
if mode == 'infer':
pdf_path = os.path.join(dirs['figures'],"figures-infer%s.pdf"%str(min_diff))
else:
pdf_path = os.path.join(dirs['figures'],"figures-train%s.pdf"%str(min_diff))
pp= PdfPages(pdf_path)
for fb_noext, fig in zip(ffiles,figs):
for ext in FIGS_EXTENSIONS:
if ext == 'pdf':
pass
fig.savefig(pp,format='pdf')
else:
if mode == 'infer':
noext = fb_noext+'-infer'
else:
noext = fb_noext+'-train'
fig.savefig(
fname = os.path.join(dirs['figures'],noext+"."+ext),
transparent = TRANSPARENT,
)
pass
if pp:
pp.close()
if 'show' in FIGS_SAVESHOW:
plt.show()
#FIGS_ESXTENSIONS
#qsf.evaluateAllResults(result_files = files['result'],
# absolute_disparity = ABSOLUTE_DISPARITY,
# cluster_radius = CLUSTER_RADIUS)
print("All done")
exit (0)
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/nn_eval_lwir_00.py 0000664 0000000 0000000 00000044414 13517677053 0023744 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from PIL import Image
import os
import sys
#import glob
#import numpy as np
import imagej_tiffwriter
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import qcstereo_functions as qsf
import numpy as np
#import xml.etree.ElementTree as ET
qsf.TIME_START = time.time()
qsf.TIME_LAST = qsf.TIME_START
IMG_WIDTH = 20 # 324 # tiles per image row Defined in config
DEBUG_LEVEL= 1
try:
conf_file = sys.argv[1]
except IndexError:
print("Configuration path is required as a first argument. Optional second argument specifies root directory for data files")
exit(1)
try:
root_dir = sys.argv[2]
except IndexError:
root_dir = os.path.dirname(conf_file)
try:
modes = [sys.argv[3]] # train, infer
except IndexError:
modes = ['train']
print ("Configuration file: " + conf_file)
parameters, dirs, files, dbg_parameters = qsf.parseXmlConfig(conf_file, root_dir)
"""
Temporarily for backward compatibility
"""
if not "SLOSS_CLIP" in parameters:
parameters['SLOSS_CLIP'] = 0.5
print ("Old config, setting SLOSS_CLIP=", parameters['SLOSS_CLIP'])
"""
Defined in config file
"""
TILE_SIDE, TILE_LAYERS, TWO_TRAINS, NET_ARCH1, NET_ARCH2 = [None]*5
ABSOLUTE_DISPARITY,SYM8_SUB, WLOSS_LAMBDA, SLOSS_LAMBDA, SLOSS_CLIP = [None]*5
SPREAD_CONVERGENCE, INTER_CONVERGENCE, HOR_FLIP, DISP_DIFF_CAP, DISP_DIFF_SLOPE = [None]*5
CLUSTER_RADIUS,ABSOLUTE_DISPARITY = [None]*2
FGBG_MODE = 1 # 0 - do not filter by single-plane, 1 - remove split plabnes tiles, 2 - remove split planes and neighbors
FIGS_EXTENSIONS = ['png','pdf','svg']
#FIGS_ESXTENSIONS = ['png','pdf','svg']
EVAL_MODES = ["train","infer"]
FIGS_SAVESHOW = ['save','show']
globals().update(parameters)
try:
FIGS_EXTENSIONS = globals()['FIGS_ESXTENSIONS'] # fixing typo in configs
except:
pass
#exit(0)
TILE_SIZE = TILE_SIDE* TILE_SIDE # == 81
FEATURES_PER_TILE = TILE_LAYERS * TILE_SIZE# == 324
BATCH_SIZE = ([1,2][TWO_TRAINS])*2*1000//25 # == 80 Each batch of tiles has balanced D/S tiles, shuffled batches but not inside batches
SUFFIX=(str(NET_ARCH1)+'-'+str(NET_ARCH2)+
(["R","A"][ABSOLUTE_DISPARITY]) +
(["NS","S8"][SYM8_SUB])+
"WLAM"+str(WLOSS_LAMBDA)+
"SLAM"+str(SLOSS_LAMBDA)+
"SCLP"+str(SLOSS_CLIP)+
(['_nG','_G'][SPREAD_CONVERGENCE])+
(['_nI','_I'][INTER_CONVERGENCE]) +
(['_nHF',"_HF"][HOR_FLIP]) +
('_CP'+str(DISP_DIFF_CAP)) +
('_S'+str(DISP_DIFF_SLOPE))
)
##############################################################################
cluster_size = (2 * CLUSTER_RADIUS + 1) * (2 * CLUSTER_RADIUS + 1)
center_tile_index = 2 * CLUSTER_RADIUS * (CLUSTER_RADIUS + 1)
qsf.prepareFiles(dirs, files, suffix = SUFFIX)
#import tensorflow.contrib.slim as slim
#NN_DISP = 0
#HEUR_DISP = 1
#GT_DISP = 2
#GT_CONF = 3
#NN_NAN = 4
#HEUR_NAN = 5
#NN_DIFF = 6
#HEUR_DIFF = 7
# Now - more layers
CONF_MAX = 0.7
ERR_AMPL = 0.3
TIGHT_TOP = 0.95
TIGHT_HPAD = 1.0
TIGHT_WPAD = 1.0
FIGSIZE = [8.5,11.0]
WOI_COLOR = "red"
X_COLOR = "grey"
X_NEIBS = False
TRANSPARENT = True # for export
#dbg_parameters
def get_fig_params(disparity_ranges):
fig_params = []
for dr in disparity_ranges:
if dr[-1][0]=='-':
fig_params.append(None)
else:
subs = []
for s in dr[:-1]:
mm = s[:2]
try:
lims = s[2]
except IndexError:
lims = None
subs.append({'lim_val':mm, 'lim_xy':lims})
fig_params.append({'name':dr[-1],'ranges':subs})
return fig_params
#try:
fig_params = get_fig_params(dbg_parameters['disparity_ranges'])
pass
#temporary:
TIFF_ONLY = False # True
#max_bad = 2.5 # excludes only direct bad
max_bad = 2.5 #2.5 # 1.5 # excludes only direct bad
max_diff = 1.5 # 2.0 # 5.0 # maximal max-min difference
max_target_err = 1.0 # 0.5 # maximal max-min difference
max_disp = 5.0
min_strength = 0.18 #ignore tiles below
min_neibs = 1
max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
#num_bins = 256 # number of histogram bins
num_bins = 15 # 50 # number of histogram bins
use_gt_weights = True # False # True
index_gt = 2
index_gt_weight = 3
index_heur_err = 7
index_nn_err = 6
index_fgbg_sngl = 10
index_fgbg_neib = 11
index_mm = 23 # 8 # max-min
index_log = 24 # 9
index_bad = 25 # 10
index_num_neibs = 26 # 11
index_fgbg = [index_fgbg_sngl,index_fgbg_neib][X_NEIBS]
"""
Debugging high 9-tile variations, removing error for all tiles with lower difference between max and min
"""
#min_diff = 0.25 # remove all flat tiles with spread less than this (do not show on heuristic/network disparity errors subplots
min_diff = 0 # remove all flat tiles with spread less than this
max_target_err2 = max_target_err * max_target_err
if not 'show' in FIGS_SAVESHOW:
plt.ioff()
#for mode in ['train','infer']:
#for mode in ['infer']:
def cross_out(
plt,
cross_out_mask):
height = cross_out_mask.shape[0]
width = cross_out_mask.shape[1]
for row in range (height):
for col in range(width):
if cross_out_mask[row,col]:
xdata = [col-0.3, col+0.3]
ydata = [row-0.3, row+0.3]
plt.plot(xdata,ydata,color=X_COLOR)
ydata = [row+0.3, row-0.3]
plt.plot(xdata,ydata,color=X_COLOR)
for mode in modes: # ['train']:
figs = []
ffiles = [] # no ext
def setlimsxy(lim_xy):
if not lim_xy is None:
plt.xlim(min(lim_xy[:2]),max(lim_xy[:2]))
plt.ylim(max(lim_xy[2:]),min(lim_xy[2:]))
cumul_weights = None
for nfile, fpars in enumerate(fig_params):
if not fpars is None:
img_file = files['result'][nfile]
if mode == 'infer':
img_file = img_file.replace('.npy','-infer.npy')
"""
try:
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=True)
# data,_ = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
except:
print ("Image file does not exist:", img_file)
continue
"""
pass
data,labels = qsf.result_npy_prepare(img_file, ABSOLUTE_DISPARITY, fix_nan=True, insert_deltas=3)
if True: #TIFF_ONLY:
tiff_path = img_file.replace('.npy','-test.tiff')
data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path)
imagej_tiffwriter.save(tiff_path,data,labels=labels)
"""
Calculate histograms
"""
err_heur2 = data[index_heur_err]*data[index_heur_err]
err_nn2 = data[index_nn_err]* data[index_nn_err]
diff_log2 = data[index_log]* data[index_log]
weights = (
(data[index_gt] < max_disp) &
(err_heur2 < max_target_err2) &
(data[index_bad] < max_bad) &
(data[index_gt_weight] >= min_strength) &
(data[index_num_neibs] >= min_neibs)&
#max_log_to_mm = 0.5 # difference between center average and center should be under this fraction of max-min (0 - disables feature)
(data[index_log] < max_log_to_mm * np.sqrt(data[index_mm]) )
).astype(data.dtype) # 0.0/1.1
#max_disp
#max_target_err
if use_gt_weights:
weights *= data[index_gt_weight]
mm = data[index_mm]
weh = np.nan_to_num(weights*err_heur2)
wen = np.nan_to_num(weights*err_nn2)
wel = np.nan_to_num(weights*diff_log2)
hist_weights,bin_vals = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weights, density = False)
hist_err_heur2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = weh, density = False)
hist_err_nn2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wen, density = False)
hist_diff_log2,_ = np.histogram(a=mm, bins = num_bins, range = (0.0, max_diff), weights = wel, density = False)
if cumul_weights is None:
cumul_weights = hist_weights
cumul_err_heur2 = hist_err_heur2
cumul_err_nn2 = hist_err_nn2
cumul_diff_log2 = hist_diff_log2
else:
cumul_weights += hist_weights
cumul_err_heur2 += hist_err_heur2
cumul_err_nn2 += hist_err_nn2
cumul_diff_log2 += hist_diff_log2
hist_err_heur2 = np.nan_to_num(hist_err_heur2/hist_weights)
hist_err_nn2 = np.nan_to_num(hist_err_nn2/hist_weights)
hist_gain2 = np.nan_to_num(hist_err_heur2/hist_err_nn2)
hist_gain = np.sqrt(hist_gain2)
hist_diff_log2 = np.nan_to_num(hist_diff_log2/hist_weights)
print("hist_err_heur2", end = " ")
print(np.sqrt(hist_err_heur2))
print("hist_err_nn2", end = " ")
print(np.sqrt(hist_err_nn2))
print("hist_gain", end = " ")
print(hist_gain)
print("hist_diff_log2", end = " ")
print(np.sqrt(hist_diff_log2))
if min_diff> 0.0:
pass
good = (mm > min_diff).astype(mm.dtype)
good /= good # good - 1, bad - nan
data[index_heur_err] *= good
data[index_nn_err] *= good
data = data.transpose(1,2,0)
if TIFF_ONLY:
continue
cross_out_mask = data[...,index_fgbg] < 0.5 #data.shape = (15,20,27)
for subindex, rng in enumerate(fpars['ranges']):
lim_val = rng['lim_val']
lim_xy = rng['lim_xy']
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title(fpars['name'])
fig.suptitle(fpars['name'])
ax_conf=plt.subplot(322)
ax_conf.set_title("Ground truth confidence")
# fig.suptitle("Groud truth confidence")
plt.imshow(data[...,qsf.GT_CONF], vmin=0, vmax=CONF_MAX, cmap='gray')
if not lim_xy is None:
pass # show frame
xdata=[min(lim_xy[:2]),max(lim_xy[:2]),max(lim_xy[:2]),min(lim_xy[:2]),min(lim_xy[:2])]
ydata=[min(lim_xy[2:]),min(lim_xy[2:]),max(lim_xy[2:]),max(lim_xy[2:]),min(lim_xy[2:])]
plt.plot(xdata,ydata,color=WOI_COLOR)
# setlimsxy(lim_xy)
plt.colorbar(orientation='vertical') # location='bottom')
ax_gtd=plt.subplot(321)
ax_gtd.set_title("Ground truth disparity map")
plt.imshow(data[...,qsf.GT_DISP], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hed=plt.subplot(323)
ax_hed.set_title("Heuristic disparity map")
plt.imshow(data[...,qsf.HEUR_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nnd=plt.subplot(325)
ax_nnd.set_title("Network disparity output")
plt.imshow(data[...,qsf.NN_NAN], vmin=lim_val[0], vmax=lim_val[1])
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_hee=plt.subplot(324)
ax_hee.set_title("Heuristic disparity error")
cross_out(plt, cross_out_mask)
plt.imshow(data[...,qsf.HEUR_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
ax_nne=plt.subplot(326)
ax_nne.set_title("Network disparity error")
plt.imshow(data[...,qsf.NN_DIFF], vmin=-ERR_AMPL, vmax=ERR_AMPL)
setlimsxy(lim_xy)
cross_out(plt, cross_out_mask)
plt.colorbar(orientation='vertical') # location='bottom')
plt.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
figs.append(fig)
fb_noext = os.path.splitext(os.path.basename(img_file))[0]#
if subindex > 0:
if subindex < 10:
fb_noext+="abcdefghi"[subindex-1]
else:
fb_noext+="-"+str(subindex)
ffiles.append(fb_noext)
pass
if False: # True:
cumul_err_heur2 = np.nan_to_num(cumul_err_heur2/cumul_weights)
cumul_err_nn2 = np.nan_to_num(cumul_err_nn2/cumul_weights)
cumul_gain2 = np.nan_to_num(cumul_err_heur2/cumul_err_nn2)
cumul_gain = np.sqrt(cumul_gain2)
cumul_diff_log2 = np.nan_to_num(cumul_diff_log2/cumul_weights)
print("cumul_weights", end = " ")
print(cumul_weights)
print("cumul_err_heur", end = " ")
print(np.sqrt(cumul_err_heur2))
print("cumul_err_nn", end = " ")
print(np.sqrt(cumul_err_nn2))
print("cumul_gain", end = " ")
print(cumul_gain)
print("cumul_diff_log2", end = " ")
print(np.sqrt(cumul_diff_log2))
fig, ax1 = plt.subplots()
ax1.set_xlabel('3x3 tiles ground truth disparity max-min (pix)')
ax1.set_ylabel('RMSE\n(pix)', color='black', rotation='horizontal')
ax1.yaxis.set_label_coords(-0.045,0.92)
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_nn2), 'tab:red',label="network disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2), 'tab:green',label="heuristic disparity RMSE")
ax1.plot(bin_vals[0:-1], np.sqrt(cumul_diff_log2), 'tab:cyan',label="ground truth LoG")
ax1.tick_params(axis='y', labelcolor='black')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('weight', color='black', rotation='horizontal') # we already handled the x-label with ax1
ax2.yaxis.set_label_coords(1.06,1.0)
ax2.plot(bin_vals[0:-1], cumul_weights,color='grey',dashes=[6, 2],label='weights = n_tiles * gt_confidence')
ax1.legend(loc="upper left", bbox_to_anchor=(0.2,1.0))
ax2.legend(loc="lower right", bbox_to_anchor=(1.0,0.1))
"""
fig = plt.figure(figsize=FIGSIZE)
fig.canvas.set_window_title('Cumulative')
fig.suptitle('Difference to GT')
# ax_conf=plt.subplot(322)
ax_conf=plt.subplot(211)
ax_conf.set_title("RMS vs max9-min9")
plt.plot(bin_vals[0:-1], np.sqrt(cumul_err_heur2),'red',
bin_vals[0:-1], np.sqrt(cumul_err_nn2),'green',
bin_vals[0:-1], np.sqrt(cumul_diff_log2),'blue')
figs.append(fig)
ffiles.append('cumulative')
ax_conf=plt.subplot(212)
ax_conf.set_title("weights vs max9-min9")
plt.plot(bin_vals[0:-1], cumul_weights,'black')
"""
figs.append(fig)
ffiles.append('cumulative')
pass
#bin_vals[0:-1]
# fig.suptitle("Groud truth confidence")
#
#how to allow adjustment before applying tight_layout?
pass
for fig in figs:
fig.tight_layout(rect =[0,0,1,TIGHT_TOP], h_pad = TIGHT_HPAD, w_pad = TIGHT_WPAD)
if FIGS_EXTENSIONS and figs and 'save' in FIGS_SAVESHOW:
try:
print ("Creating output directory for figures: ",dirs['figures'])
os.makedirs(dirs['figures'])
except:
pass
pp=None
if 'pdf' in FIGS_EXTENSIONS:
if mode == 'infer':
pdf_path = os.path.join(dirs['figures'],"figures-infer%s.pdf"%str(min_diff))
else:
pdf_path = os.path.join(dirs['figures'],"figures-train%s.pdf"%str(min_diff))
pp= PdfPages(pdf_path)
for fb_noext, fig in zip(ffiles,figs):
for ext in FIGS_EXTENSIONS:
if ext == 'pdf':
pass
fig.savefig(pp,format='pdf')
else:
if mode == 'infer':
noext = fb_noext+'-infer'
else:
noext = fb_noext+'-train'
fig.savefig(
fname = os.path.join(dirs['figures'],noext+"."+ext),
transparent = TRANSPARENT,
)
pass
if pp:
pp.close()
if 'show' in FIGS_SAVESHOW:
plt.show()
#FIGS_ESXTENSIONS
#qsf.evaluateAllResults(result_files = files['result'],
# absolute_disparity = ABSOLUTE_DISPARITY,
# cluster_radius = CLUSTER_RADIUS)
print("All done")
exit (0)
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/qcstereo_functions.py 0000664 0000000 0000000 00000131172 13517677053 0024701 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import numpy as np
import tensorflow as tf
import xml.etree.ElementTree as ET
import time
import imagej_tiffwriter
TIME_LAST = 0
TIME_START = 0
corr2_limits = None
MARGINS = 2 # disregard errors outside
NN_DISP = 0
#HEUR_DISP = 1
TARGET_DISP = 1
GT_DISP = 2
GT_CONF = 3
NN_NAN = 4 #first insertedf layer
HEUR_NAN = 5
NN_DIFF = 6
HEUR_DIFF = 7
NN_ERR_SNGL = 8
NN_ERR_SNGL_NEIB = 9
FGBG_SNGL = 10
FGBG_SNGL_NEIB = 11 #last inserted layer
CUTCORN_COST_NW = 12
CUTCORN_COST = 13
GT_AVG_DIST = 14
AVG8_DISP = 15
GT_DISP1 = 16
OUT_AVG = 17
AUX_DISP = 18
FG_DISP = 19
BG_DISP = 20
GT_RMS = 21
GT_RMS_SPLIT = 22
EXTEND = CUTCORN_COST_NW - NN_NAN # insert this many layers (8)
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end, flush=True)
TIME_LAST = t
DEFAULT_TITLES = [
['test_lvar', 'Test_flat_heuristic'],
['test_hvar', 'Test_edge_heuristic'],
['test_lvar1', 'Test_flat_random'],
['test_hvar1', 'Test_edge_random'],
['fake_lvar', 'Fake_flat_heuristic'],
['fake_hvar', 'Fake_edge_heuristic'],
['fake_lvar1', 'Fake_flat_random'],
['fake_hvar1', 'Fake_edge_random']]
def parseXmlConfig(conf_file, root_dir):
tree = ET.parse(conf_file)
root = tree.getroot()
parameters = {}
for p in root.find('parameters'):
## print ("p.tag=%s, p.text.stri[p()=%s"%(p.tag,p.text.strip()))
parameters[p.tag]=eval(p.text.strip())
# globals
dirs={}
for p in root.find('directories'):
dirs[p.tag]=eval(p.text.strip())
if not os.path.isabs(dirs[p.tag]):
dirs[p.tag] = os.path.join(root_dir, dirs[p.tag])
files={}
for p in root.find('files'):
files[p.tag]=eval(p.text.strip())
dbg_parameters = {}
for p in root.find('dbg_parameters'):
try:
dbg_parameters[p.tag]=eval(p.text.strip())
except:
print("Error in xml - p.tag = %s, p.text.strip()=%s"%(p.tag, p.text.strip()))
continue
return parameters, dirs, files, dbg_parameters
def defaultTestTitles(files):
test_titles = []
for f, n in DEFAULT_TITLES:
if f in files:
test_titles.append(n)
else:
test_titles.append(None)
return test_titles
def prepareFiles(dirs, files, suffix):
#MAX_FILES_PER_GROUP
for i, path in enumerate(files['train_lvar']):
files['train_lvar'][i]=os.path.join(dirs['train_lvar'], path)
for i, path in enumerate(files['train_hvar']):
files['train_hvar'][i]=os.path.join(dirs['train_hvar'], path)
for i, path in enumerate(files['train_lvar1']):
files['train_lvar1'][i]=os.path.join(dirs['train_lvar1'], path)
for i, path in enumerate(files['train_hvar1']):
files['train_hvar1'][i]=os.path.join(dirs['train_hvar1'], path)
for i, path in enumerate(files['test_lvar']):
files['test_lvar'][i]=os.path.join(dirs['test_lvar'], path)
for i, path in enumerate(files['test_hvar']):
files['test_hvar'][i]=os.path.join(dirs['test_hvar'], path)
if ('test_lvar1' in files) and ('test_lvar1' in dirs):
for i, path in enumerate(files['test_lvar1']):
files['test_lvar1'][i]=os.path.join(dirs['test_lvar1'], path)
if ('test_hvar1' in files) and ('test_hvar1' in dirs):
for i, path in enumerate(files['test_hvar1']):
files['test_hvar1'][i]=os.path.join(dirs['test_hvar1'], path)
if ('fake_lvar' in files) and ('fake_lvar' in dirs):
for i, path in enumerate(files['fake_lvar']):
files['fake_lvar'][i]=os.path.join(dirs['fake_lvar'], path)
if ('fake_hvar' in files) and ('fake_hvar' in dirs):
for i, path in enumerate(files['fake_hvar']):
files['fake_hvar'][i]=os.path.join(dirs['fake_hvar'], path)
if ('fake_lvar1' in files) and ('fake_lvar1' in dirs):
for i, path in enumerate(files['fake_lvar1']):
files['fake_lvar1'][i]=os.path.join(dirs['fake_lvar1'], path)
if ('fake_hvar' in files) and ('fake_hvar' in dirs):
for i, path in enumerate(files['fake_hvar1']):
files['fake_hvar1'][i]=os.path.join(dirs['fake_hvar1'], path)
result_files=[]
for i, path in enumerate(files['images']):
result_files.append(os.path.join(dirs['result'], path+"_"+suffix+'.npy'))
files['result'] = result_files
if not 'checkpoints' in files:
files['checkpoints'] = 'checkpoints'
if not 'checkpoints' in dirs:
dirs['checkpoints'] = dirs['result']
files['checkpoints'] = os.path.join(dirs['checkpoints'], files['checkpoints'])
if not 'inference' in files:
files['inference'] = 'inference'
if not 'inference' in dirs:
dirs['inference'] = dirs['result']
files['inference'] = os.path.join(dirs['inference'], files['inference'])
if not 'exportdir' in files:
files['exportdir'] = 'exportdir'
if not 'exportdir' in dirs:
dirs['exportdir'] = dirs['result']
files['exportdir'] = os.path.join(dirs['exportdir'], files['exportdir'])
if not 'figures' in dirs:
dirs['figures'] = os.path.join(dirs['result'],"figs")
files['train'] = [files['train_lvar'],files['train_hvar'], files['train_lvar1'], files['train_hvar1']]
files['test'] = [files['test_lvar'], files['test_hvar']]
if 'test_lvar1' in files:
files['test'].append(files['test_lvar1'])
if 'test_hvar1' in files:
files['test'].append(files['test_hvar1'])
# should be after result files
for i, path in enumerate(files['images']):
files['images'][i] = os.path.join(dirs['images'], path+'.tfrecords')
def readTFRewcordsEpoch(train_filename, cluster_radius):
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
npy_dir_name = "npy"
dirname = os.path.dirname(train_filename)
npy_dir = os.path.join(dirname, npy_dir_name)
filebasename, _ = os.path.splitext(train_filename)
filebasename = os.path.basename(filebasename)
file_all = os.path.join(npy_dir,filebasename + '.npy')
if os.path.exists(file_all):
data = np.load (file_all)
else:
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
extra_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append (np.array(example.features.feature['corr2d'].float_list.value, dtype=np.float32))
target_disparity_list.append (np.array(example.features.feature['target_disparity'].float_list.value, dtype=np.float32))
gt_ds_list.append (np.array(example.features.feature['gt_ds'].float_list.value, dtype= np.float32))
try:
extra_list.append (np.array(example.features.feature['extra'].float_list.value, dtype= np.float32))
except:
pass
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
if len(extra_list):
extra = np.array(extra_list)
else:
extra = None
try:
os.makedirs(os.path.dirname(file_all))
except:
pass
if cluster_radius > 0:
reformat_to_clusters(
corr2d,
target_disparity,
gt_ds,
extra,
cluster_radius)
if not extra is None:
data = np.concatenate(
[corr2d, target_disparity, gt_ds, extra],
axis = 1)
else:
data = np.concatenate(
[corr2d, target_disparity, gt_ds],
axis = 1)
np.save(file_all, data)
return data
def getMoreFiles(fpaths,rslt, cluster_radius, hor_flip, tile_layers, tile_side):
for fpath in fpaths:
dataset = readTFRewcordsEpoch(fpath, cluster_radius)
if hor_flip:
if np.random.randint(2):
print_time("Performing horizontal flip", end=" ")
flip_horizontal(dataset, cluster_radius, tile_layers, tile_side)
print_time("Done")
rslt.append(dataset)
#from http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/21/tfrecords-guide/
def read_and_decode(filename_queue, featrures_per_tile):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'corr2d': tf.FixedLenFeature([featrures_per_tile],tf.float32), #string),
'target_disparity': tf.FixedLenFeature([1], tf.float32), #.string),
'gt_ds': tf.FixedLenFeature([2], tf.float32) #.string)
})
corr2d = features['corr2d'] # tf.decode_raw(features['corr2d'], tf.float32)
target_disparity = features['target_disparity'] # tf.decode_raw(features['target_disparity'], tf.float32)
gt_ds = tf.cast(features['gt_ds'], tf.float32) # tf.decode_raw(features['gt_ds'], tf.float32)
in_features = tf.concat([corr2d,target_disparity],0)
corr2d_out, target_disparity_out, gt_ds_out = tf.train.shuffle_batch( [in_features, target_disparity, gt_ds],
batch_size=1000, # 2,
capacity=30,
num_threads=2,
min_after_dequeue=10)
return corr2d_out, target_disparity_out, gt_ds_out
def add_margins(npa,radius, val = np.nan):
npa_ext = np.empty((npa.shape[0]+2*radius, npa.shape[1]+2*radius, npa.shape[2]), dtype = npa.dtype)
npa_ext[radius:radius + npa.shape[0],radius:radius + npa.shape[1]] = npa
npa_ext[0:radius,:,:] = val
npa_ext[radius + npa.shape[0]:,:,:] = val
npa_ext[:,0:radius,:] = val
npa_ext[:, radius + npa.shape[1]:,:] = val
return npa_ext
def add_neibs(npa_ext,radius):
height = npa_ext.shape[0]-2*radius
width = npa_ext.shape[1]-2*radius
side = 2 * radius + 1
# size = side * side
npa_neib = np.empty((height, width, side, side, npa_ext.shape[2]), dtype = npa_ext.dtype)
for dy in range (side):
for dx in range (side):
npa_neib[:,:,dy, dx,:]= npa_ext[dy:dy+height, dx:dx+width]
return npa_neib.reshape(height, width, -1)
def extend_img_to_clusters(datasets_img,radius, width): # = 324):
# side = 2 * radius + 1
# size = side * side
if len(datasets_img) ==0:
return
num_tiles = datasets_img[0]['corr2d'].shape[0]
height = num_tiles // width
for rec in datasets_img:
if not rec is None:
rec['corr2d'] = add_neibs(add_margins(rec['corr2d'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
rec['target_disparity'] = add_neibs(add_margins(rec['target_disparity'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
rec['gt_ds'] = add_neibs(add_margins(rec['gt_ds'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
try:
rec['extra'] = add_neibs(add_margins(rec['extra'].reshape((height,width,-1)), radius, np.nan), radius).reshape((num_tiles,-1))
except:
pass
pass
def reformat_to_clusters_rec(datasets_data, cluster_radius):
cluster_size = (2 * cluster_radius + 1) * (2 * cluster_radius + 1)
# Reformat input data
for rec in datasets_data:
rec['corr2d'] = rec['corr2d'].reshape( (rec['corr2d'].shape[0]//cluster_size, rec['corr2d'].shape[1] * cluster_size))
rec['target_disparity'] = rec['target_disparity'].reshape((rec['target_disparity'].shape[0]//cluster_size, rec['target_disparity'].shape[1] * cluster_size))
rec['gt_ds'] = rec['gt_ds'].reshape( (rec['gt_ds'].shape[0]//cluster_size, rec['gt_ds'].shape[1] * cluster_size))
try:
rec['extra'] = rec['extra'].reshape( (rec['extra'].shape[0]//cluster_size, rec['extra'].shape[1] * cluster_size))
except:
pass
def reformat_to_clusters(
corr2d,
target_disparity,
gt_ds,
extra, # may be None
cluster_radius):
cluster_size = (2 * cluster_radius + 1) * (2 * cluster_radius + 1)
# Reformat input data
corr2d.shape = ((corr2d.shape[0]//cluster_size, corr2d.shape[1] * cluster_size))
target_disparity.shape = ((target_disparity.shape[0]//cluster_size, target_disparity.shape[1] * cluster_size))
gt_ds.shape = ((gt_ds.shape[0]//cluster_size, gt_ds.shape[1] * cluster_size))
if not extra is None:
extra.shape = ((extra.shape[0]//cluster_size, extra.shape[1] * cluster_size))
def get_lengths(
cluster_radius,
tile_layers,
tile_side):
cluster_side = 2 * cluster_radius + 1
cl = cluster_side * cluster_side * tile_layers * tile_side * tile_side
tl = cluster_side * cluster_side
gl = cluster_side * cluster_side * 2 # disparity+strength, rest goes to extra
return cl, tl, gl, cluster_side
def flip_horizontal(dataset, cluster_radius, tile_layers, tile_side):
cl, tl, gl, cluster_side = get_lengths(cluster_radius, tile_layers, tile_side)
corr2d = dataset[:,:cl] .reshape([dataset.shape[0], cluster_side, cluster_side, tile_layers, tile_side, tile_side])
target_disparity = dataset[:,cl:cl+tl].reshape([dataset.shape[0], cluster_side, cluster_side, -1])
gt_ds = dataset[:,cl+tl:cl+tl+gl] .reshape([dataset.shape[0], cluster_side, cluster_side, -1])
# no extra here !
"""
Horizontal flip of tiles
"""
corr2d = corr2d[:,:,::-1,...]
target_disparity = target_disparity[:,:,::-1,...]
gt_ds = gt_ds[:,:,::-1,...]
corr2d[:,:,:,0,:,:] = corr2d[:,:,:,0,::-1,:] # flip vertical layer0 (hor)
corr2d[:,:,:,1,:,:] = corr2d[:,:,:,1,:,::-1] # flip horizontal layer1 (vert)
corr2d_2 = corr2d[:,:,:,3,::-1,:].copy() # flip vertical layer3 (diago)
corr2d[:,:,:,3,:,:] = corr2d[:,:,:,2,::-1,:] # flip vertical layer2 (diago)
corr2d[:,:,:,2,:,:] = corr2d_2
"""
pack back into a single (input)array
"""
dataset[:,:cl] = corr2d.reshape((corr2d.shape[0],-1))
dataset[:,cl:cl+tl] = target_disparity.reshape((target_disparity.shape[0],-1))
dataset[:,cl+tl:] = gt_ds.reshape((gt_ds.shape[0],-1))
def replace_nan(datasets_data): # , cluster_radius):
# Reformat input data
for rec in datasets_data:
if not rec is None:
np.nan_to_num(rec['corr2d'], copy = False)
np.nan_to_num(rec['target_disparity'], copy = False)
if 'gt_ds' in rec:
np.nan_to_num(rec['gt_ds'], copy = False)
if 'extra' in rec:
np.nan_to_num(rec['extra'], copy = False)
def permute_to_swaps(perm):
pairs = []
for i in range(len(perm)):
w = np.where(perm == i)[0][0]
if w != i:
pairs.append([i,w])
perm[w] = perm[i]
perm[i] = i
return pairs
def shuffle_in_place(dataset_data, #alternating clusters from 4 sources.each cluster has all needed data (concatenated)
period):
for i in range (period):
np.random.shuffle(dataset_data[i::period])
def add_file_to_dataset(dataset, new_dataset, train_next):
train_next['file'] = (train_next['file']+1)%train_next['files']
l = new_dataset.shape[0] * train_next['step']
if (train_next['entry'] + l) < (train_next['entries']+train_next['step']):
dataset[train_next['entry']:train_next['entry']+l:train_next['step']] = new_dataset
train_next['entry'] += l
if (train_next['entry'] >= train_next['entries']):
train_next['entry'] -= train_next['entries']
return True
else:
return False
else: # split it two parts
l = (train_next['entries'] - train_next['entry'] + (train_next['step']-1)) // train_next['step']
dataset[train_next['entry']::train_next['step']] = new_dataset[:l]
train_next['entry'] = (train_next['entry'] + l * train_next['step']) % train_next['entries'] #0,1,2,3
l1 = new_dataset.shape[0] - l # remainder
ln = train_next['entry'] + l1 * train_next['step']
dataset[train_next['entry']:ln:train_next['step']] = new_dataset[l:]
train_next['entry'] = ln
return True
"""
train_next[n_train]
Read as many files as needed, possibly repeating, until each buffer is f
"""
def initTrainTestData(
files,
cluster_radius,
buffer_size, # number of clusters per train
test_titles = None
):
"""
Generates a single np array for training with concatenated cluster of corr2d,
cluster of target_disparity, and cluster of gt_ds for convenient shuffling
"""
num_trains = len(files['train'])
num_entries = num_trains * buffer_size
dataset_train_merged = None
train_next = [None]*num_trains
for n_train, f_train in enumerate(files['train']):
train_next[n_train] = {'file':0, 'entry':n_train, 'files':len(f_train), 'entries': num_entries, 'step':num_trains, 'more_files':False}
buffer_full = False
while not buffer_full:
for fpath in f_train:
print_time("Importing train data "+(["low variance","high variance", "low variance1","high variance1"][n_train]) +" from "+fpath, end="")
new_dataset = readTFRewcordsEpoch(fpath, cluster_radius)
if dataset_train_merged is None:
dataset_train_merged = np.empty([num_entries,new_dataset.shape[1]], dtype =new_dataset.dtype)
## print("\nbefore add_file_to_dataset: train_next["+str(n_train)+"]=",train_next[n_train])
rollover = add_file_to_dataset(
dataset = dataset_train_merged,
new_dataset = new_dataset,
train_next = train_next[n_train])
## print("after add_file_to_dataset: train_next["+str(n_train)+"]=",train_next[n_train])
print_time(" Done")
if rollover:
buffer_full = True
train_next[n_train][ 'more_files'] = train_next[n_train][ 'file'] < train_next[n_train][ 'files'] # Not all files used, need to load during training
break
if test_titles is None:
test_titles = defaultTestTitles(files)
datasets_test = []
for t,v in zip(test_titles,DEFAULT_TITLES):
if not t is None:
grp = v[0]
for fpath in files[grp]:
print_time("Importing test data ("+grp+") from "+fpath, end="")
new_dataset = readTFRewcordsEpoch(fpath, cluster_radius)
datasets_test.append(new_dataset)
print_time(" Done")
"""
for grp in ['test_lvar','test_hvar','test_lvar1','test_hvar1']:
if grp in files:
for fpath in files[grp]:
print_time("Importing test data ("+grp+") from "+fpath, end="")
new_dataset = readTFRewcordsEpoch(fpath, cluster_radius)
datasets_test.append(new_dataset)
print_time(" Done")
"""
return train_next, dataset_train_merged, datasets_test
def get_full_tile_indices2d(height,width):
a = np.empty([height,width,2], dtype=np.int32)
a[...,0] = np.arange(height).reshape([height,1])
a[...,1] = np.arange(width)
return a.reshape(-1,2)
def get_full_tile_indices(height,width):
return np.arange(height*width).reshape(-1,1)
def readImageData(image_data,
files,
indx,
cluster_radius,
tile_layers,
tile_side,
width,
replace_nans,
infer = False,
keep_gt = False):
cl, tl, gl, _ = get_lengths(0, tile_layers, tile_side)
if image_data[indx] is None:
dataset = readTFRewcordsEpoch(
files['images'][indx],
cluster_radius = 0)
corr2d = dataset[:,:cl]
target_disparity = dataset[:,cl:cl+tl]
if infer:
image_data[indx] = {
'corr2d': corr2d,
'target_disparity': target_disparity,
'xy': get_full_tile_indices2d(corr2d.shape[0]//width, width),
'ntile': get_full_tile_indices(corr2d.shape[0]//width, width)}
if keep_gt:
gt_ds = dataset[:,cl+tl:cl+tl+gl]
extra = dataset[:,cl+tl+gl:]
image_data[indx]["gt_ds"] = gt_ds
image_data[indx]["gtruths"]= gt_ds.copy()
image_data[indx]["t_disps"]= target_disparity.reshape([-1,1]).copy()
image_data[indx]["extra"] = extra
image_data[indx]["t_extra"] = extra.copy()
else:
gt_ds = dataset[:,cl+tl:cl+tl+gl]
extra = dataset[:,cl+tl+gl:]
image_data[indx] = {
'corr2d': corr2d,
'target_disparity': target_disparity,
"gt_ds": gt_ds,
"gtruths": gt_ds.copy(), # never used?
"t_disps": target_disparity.reshape([-1,1]).copy(),
"extra": extra, # will be increased by 25
"t_extra": extra.copy() } #will still be (ntiles,3)
if cluster_radius > 0:
extend_img_to_clusters(
[image_data[indx]],
cluster_radius,
width)
if replace_nans:
replace_nan([image_data[indx]])
if not (corr2_limits is None):
image_data[indx]['corr2d'] = np.clip(image_data[indx]['corr2d'], corr2_limits[0], corr2_limits[1])
return image_data[indx]
def setCorr2Limits(limits):
if not (limits is None) and ((not limits[0] is None) or (not limits[1] is None)):
globals()['corr2_limits'] = limits
else:
globals()['corr2_limits'] = None
def initImageData(files,
max_imgs,
cluster_radius,
tile_layers,
tile_side,
width,
replace_nans,
infer = False,
keep_gt = False):
# no_train = False):
num_imgs = len(files['images'])
img_data = [None] * num_imgs
for nfile in range(min(num_imgs, max_imgs)):
print_time("Importing test image data from "+ files['images'][nfile], end="")
readImageData(img_data,
files,
nfile,
cluster_radius,
tile_layers,
tile_side,
width,
replace_nans,
infer = infer,
keep_gt = keep_gt)
print_time(" Done")
return img_data
def evaluateAllResults(result_files, absolute_disparity, cluster_radius, labels=None, logpath=None, fgbg_mode=1):
if logpath:
lf=open(logpath,"w")
else:
lf = None
for result_file in result_files:
try:
print_time("Reading resuts from "+result_file, end=" ")
eval_results(result_file, absolute_disparity, radius=cluster_radius, last_fgbg_mode = fgbg_mode, logfile=lf)
except:
print_time(" - does not exist")
continue
print_time("Done")
print_time("Saving resuts to tiff", end=" ")
result_npy_to_tiff(result_file, absolute_disparity, fix_nan = True, labels=labels)
print_time("Done")
if lf:
lf.close()
def result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas=True,labels=None):
"""
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
data will be written as 4-layer tiff, extension '.npy' replaced with '.tiff'
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
@param insert_deltas: +1 - add delta layers, +2 - add variance (max - min of this and 8 neighbors)
with lwir data.shape = (15, 20, 15)
"""
data = np.load(npy_path) #(324,242,4) [nn_disp, target_disp,gt_disp, gt_conf]
if labels is None:
labels = ["chn%d"%(i) for i in range(data.shape[2])]
# labels = ["nn_out","hier_out","gt_disparity","gt_strength"]
# extend = 8 # inserted extend slices
# nn_out = 0
# target_disparity = 1
# gt_disparity = 2
# gt_strength = 3
# nn_out1 = 4
# heur_out = 5
# nn_err = 6
# heur_err = 7
# nn_err_sngl = 8
# nn_err_sngl_neib = 9
# fgbg_sngl = 10
# fgbg_sngl_neib = 11
# cutcorn_cost_nw = 12
# aux_disp = 18
# fg_disp = 19
# bg_disp = 20
# gt_rms = 21
# gt_rms_split = 22
min_heur_err = 0.001
height = data.shape[0]
width = data.shape[1]
nocenter9 = np.array([[[1,1,1,1,np.nan,1,1,1,1]]], dtype = data.dtype)
if not absolute:
if fix_nan:
data[...,NN_DISP] += np.nan_to_num(data[...,1], copy=True)
else:
data[...,NN_DISP] += data[...,1]
if (insert_deltas & 1):
np.nan_to_num(data[...,GT_CONF], copy=False)
data = np.concatenate(
[data[...,0:4],
data[...,NN_DISP: NN_DISP+1],
data[...,AUX_DISP-EXTEND:AUX_DISP-EXTEND+1], #data[...,0:2],
data[...,NN_DISP: NN_DISP+1],
data[...,AUX_DISP-EXTEND:AUX_DISP-EXTEND+1], #data[...,0:2],
np.empty_like(data[...,0:4]),
data[...,4:]],
axis = 2) # data[...,4:] may be empty
labels = labels[:4]+["nn_out","heur_out","nn_err","heur_err", "nn_err_sngl", "nn_err_sngl_neib", "fgbg_sngl", "fgbg_sngl_neib"]+labels[4:]
data[..., NN_DIFF] -= data[...,GT_DISP] # 6
data[..., HEUR_DIFF] -= data[...,GT_DISP] # 7
#replace data with NaN where gt_strength == 0 in selected layers
for l in [GT_DISP, NN_NAN, HEUR_NAN, NN_DIFF, HEUR_DIFF]: # 0, 4, 5, 6, 7
if l < data.shape[2]:
data[...,l] = np.select([data[...,GT_CONF]==0.0, data[...,GT_CONF]>0.0], [np.nan,data[...,l]])
# All other layers - mast too
# for l in range(8,data.shape[2]):
for l in range(CUTCORN_COST_NW, AUX_DISP):
data[...,l] = np.select([data[...,GT_CONF]==0.0, data[...,GT_CONF]>0.0], [np.nan,data[...,l]])
# Filter NN errors by excluding margins and using only single-plane (no FG+BG) tiles, and tiles that do not have split FG/BG neighbors
fgbg_single = data[...,GT_RMS] <= data[...,GT_RMS_SPLIT]
fgbg_ext = 1
fgbg_single_ext = np.ones((height + 2 * fgbg_ext, width + 2 * fgbg_ext),dtype=np.bool)
fgbg_single_ext[fgbg_ext:-fgbg_ext, fgbg_ext:-fgbg_ext] = fgbg_single
for dy in range(2*fgbg_ext+1):
for dx in range(2*fgbg_ext+1):
fgbg_single_ext[dy:dy+fgbg_single.shape[0], dx:dx+fgbg_single.shape[1]] &= fgbg_single
fgbg_single2 = fgbg_single_ext[fgbg_ext:-fgbg_ext,fgbg_ext:-fgbg_ext] #
#create margins array
if MARGINS > 0:
wo_margins = np.zeros((height, width), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
fgbg_single &= wo_margins;
fgbg_single2 &= wo_margins;
data[..., NN_ERR_SNGL] = fgbg_single * data[..., NN_DIFF]
data[..., NN_ERR_SNGL_NEIB] = fgbg_single2 * data[..., NN_DIFF]
data[..., FGBG_SNGL] = fgbg_single * 1.0
data[..., FGBG_SNGL_NEIB] = fgbg_single2 * 1.0
"""
Calculate bad tiles where ggt was used as a master, to remove them from the results (later versions add random error)
"""
bad1 = abs(data[...,HEUR_DIFF]) < min_heur_err
bad1_ext = np.concatenate([bad1 [0:1,:], bad1 [0:1,:], bad1[:,:], bad1 [-1:height,:], bad1 [-1:height,:]],axis = 0)
bad1_ext = np.concatenate([bad1_ext[:,0:1], bad1_ext[:,0:1], bad1_ext[:,:], bad1_ext[:,-1:width], bad1_ext[:,-1:width]], axis = 1)
bad25 = np.empty(shape=[height, width, 25], dtype=bad1.dtype)
bm25=np.array([[[1,1,1,1,1, 1,1,1,1,1, 1,1,1,1,1, 1,1,1,1,1, 1,1,1,1,1]]])
bm09=np.array([[[0,0,0,0,0, 0,1,1,1,0, 0,1,1,1,0, 0,1,1,1,0, 0,0,0,0,0]]])
bm01=np.array([[[0,0,0,0,0, 0,0,0,0,0, 0,0,1,0,0, 0,0,0,0,0, 0,0,0,0,0]]])
for row in range(5):
for col in range(5):
pass
bad25 [...,row*5+col]= bad1_ext[row:height+row, col:width+col]
bad_num1=(np.sum(bad25*bm25,axis=2) > 0).astype(data.dtype)
bad_num2=(np.sum(bad25*bm09,axis=2) > 0).astype(data.dtype)
bad_num3=(np.sum(bad25*bm01,axis=2) > 0).astype(data.dtype)
bad_num = bad_num1 + bad_num2 + bad_num3
if (insert_deltas & 2):
wo = 0.7 # ortho
wc = 0.5 #corner
w8=np.array([wc,wo,wc,wo,0.0,wo,wc,wo,wc], dtype=data.dtype)
w8/=np.sum(w8) #normalize
gt_ext = np.concatenate([data[0:1,:,GT_DISP], data[:,:,GT_DISP], data[-1:height,:,GT_DISP]],axis = 0)
gt_ext = np.concatenate([gt_ext[:,0:1], gt_ext[:,:], gt_ext[:,-1:width]], axis = 1)
gs_ext = np.concatenate([data[0:1,:,GT_CONF], data[:,:,GT_CONF], data[-1:height,:,GT_CONF]],axis = 0)
gs_ext = np.concatenate([gs_ext[:,0:1], gs_ext[:,:], gs_ext[:,-1:width]], axis = 1)
data9 = np.empty(shape=[height, width, 9], dtype=data.dtype)
weight9 = np.empty(shape=[height, width, 9], dtype=data.dtype)
for row in range(3):
for col in range(3):
pass
data9 [...,row*3+col]= gt_ext[row:height+row, col:width+col]
weight9[...,row*3+col]= gs_ext[row:height+row, col:width+col]
data9 *= weight9/weight9 # make data=nan where wigth is 0
# data = np.concatenate([data[...],np.empty_like(data[...,-1])], axis = 2) # data[...,4:] may be empty
data = np.concatenate([data[...],np.empty(shape=[height,width,4],dtype=data.dtype)], axis = 2) # data[...,4:] may be empty
data[...,-4] = np.nanmax(data9*nocenter9, axis=2)-np.nanmin(data9*nocenter9,axis=2)# will ignore nan
np.nan_to_num(data9,copy=False) # replace all nan in data9 with 0.
weight9 *= w8
w_center = np.sum(weight9, axis=2)
dw_center = np.sum(data9*weight9, axis=2)
dw_center /= w_center # now dw_center - weighted average in the center
data[...,-3] = np.abs(data[...,GT_DISP]- dw_center)
# data[...,-2] = data[...,gt_disparity]- dw_center
#data[...,-3] *= (data[...,-4] < 1.0) # just temporary
#data[...,-3] *= (data[...,gt_disparity] < 5) #just temporary
data[...,-2] =bad_num.astype(data.dtype)
data [...,-1]= np.sum(np.nan_to_num(weight9/weight9),axis=2).astype(data.dtype)
# data[...,-1] = dw_center
labels +=["max-min","abs-center","badness","neibs"]
#neib = np.concatenate([gt_ext[:height,:width,:],],axis = )
pass
return data, labels
def result_npy_to_tiff(npy_path,
absolute,
fix_nan,
insert_deltas=True,
labels = None,
logfile = None):
"""
@param npy_path full path to the npy file with 4-layer data (242,324,4) - nn_disparity(offset), target_disparity, gt disparity, gt strength
data will be written as 4-layer tiff, extension '.npy' replaced with '.tiff'
@param absolute - True - the first layer contains absolute disparity, False - difference from target_disparity
@param fix_nan - replace nan in target_disparity with 0 to apply offset, target_disparity will still contain nan
"""
data,labels = result_npy_prepare(npy_path, absolute, fix_nan, insert_deltas, labels=labels)
tiff_path = npy_path.replace('.npy','.tiff')
data = data.transpose(2,0,1)
print("Saving results to TIFF: "+tiff_path)
if (logfile):
print("Saving results to TIFF: "+tiff_path,file=logfile)
imagej_tiffwriter.save(tiff_path,data,labels=labels)
def eval_results(rslt_path, absolute,
min_disp = -0.1, #minimal GT disparity
max_disp = 20.0, # maximal GT disparity
max_ofst_target = 1.0,
max_ofst_result = 1.0,
str_pow = 1.0,
last_fgbg_mode = 1, # 0 - no fgbg filter, 1 exclude tiles with fg/bg, 2 exclude fg/bg tiles and neighbors
radius = 0,
logfile = None):
variants = [[ -0.1, 3.0, 0.5, 0.5, 0.0, 1],
[ -0.1, 3.0, 0.5, 0.5, 1.0, 1],
[ -0.1, 3.0, 0.5, 0.5, 0.0, 2],
[ -0.1, 3.0, 0.5, 0.5, 1.0, 2],
[ -0.1, 3.0, 0.5, 0.5, 1.0, 0],
[ -0.1, 10.0, 5.0, 5.0, 0.0, 1],
[ -0.1, 10.0, 5.0, 5.0, 1.0, 1],
[ -0.1, 10.0, 5.0, 5.0, 1.0, 2],
[ -0.1, 10.0, 5.0, 5.0, 1.0, 0],
[ min_disp, max_disp, max_ofst_target, max_ofst_result, str_pow, last_fgbg_mode]]
stack = np.load(rslt_path)
layers = {
"nn_out_ext": stack[..., 0],
"target_disp": stack[..., 1], # used as target disparity, it is not heuristic data!
"gt_disparity": stack[..., 2],
"gt_strength": stack[..., 3],
"cutcorn_cost_nw": stack[..., 4],
"cutcorn_cost": stack[..., 5],
"gt_avg_dist": stack[..., 6],
"avg8_disp": stack[..., 7],
"gt_disp": stack[..., 8],
"out_avg": stack[..., 9],
"aux_disp": stack[...,10],
"fg_disp": stack[...,11],
"bg_disp": stack[...,12],
"gt_rms": stack[...,13],
"gt_rms_split": stack[...,14],
}
'''
SLICE_LABELS = ["nn_out_ext","target_disp","gt_disparity","gt_strength",
"cutcorn_cost_nw","cutcorn_cost",
"gt_avg_dist","avg8_disp","gt_disp","out_avg",
"aux_disp","fg_disp","bg_disp","gt_rms","gt_rms_split"]
MARGINS = 2 # disregard errors outside
'''
fgbg_single = layers["gt_rms"] <= layers["gt_rms_split"]
fgbg_ext = 1
fgbg_single_ext = np.ones((stack.shape[0] + 2 * fgbg_ext, stack.shape[1] + 2 * fgbg_ext),dtype=np.bool)
fgbg_single_ext[fgbg_ext:-fgbg_ext, fgbg_ext:-fgbg_ext] = fgbg_single
for dy in range(2*fgbg_ext+1):
for dx in range(2*fgbg_ext+1):
fgbg_single_ext[dy:dy+fgbg_single.shape[0], dx:dx+fgbg_single.shape[1]] &= fgbg_single
fgbg_single2 = fgbg_single_ext[fgbg_ext:-fgbg_ext,fgbg_ext:-fgbg_ext] #
not_nan = ~np.isnan(layers["nn_out_ext"]) # nn_out_ext
not_nan &= ~np.isnan(layers["target_disp"]) # target_disp
not_nan &= ~np.isnan(layers["gt_disparity"]) # gt_disparity
not_nan &= ~np.isnan(layers["gt_strength"]) # gt_strength
# pessimistic - make not_nan to have no NaN-s in 5x5 clusters. Maybe too strict for LWIR - nothing will remain
if radius > 0:
not_nan_ext = np.zeros((stack.shape[0] + 2*radius,stack.shape[1] + 2 * radius),dtype=np.bool)
not_nan_ext[radius:-radius,radius:-radius] = not_nan
for dy in range(2*radius+1):
for dx in range(2*radius+1):
not_nan_ext[dy:dy+not_nan.shape[0], dx:dx+not_nan.shape[1]] &= not_nan
not_nan = not_nan_ext[radius:-radius,radius:-radius]
if MARGINS > 0:
wo_margins = np.zeros((stack.shape[0],stack.shape[1]), dtype=bool)
wo_margins[MARGINS:-MARGINS, MARGINS:-MARGINS] = True
not_nan &= wo_margins
if not absolute:
stack[...,0] += stack[...,1]
nn_disparity = np.nan_to_num(stack[...,0], copy = False)
target_disparity = np.nan_to_num(stack[...,1], copy = False)
heurist_disparity = np.nan_to_num(layers["aux_disp"], copy = False)
gt_disparity = np.nan_to_num(stack[...,2], copy = False)
gt_strength = np.nan_to_num(stack[...,3], copy = False)
rrslt = []
print ("--------------- %s ---------------"%(rslt_path))
if logfile:
print ("--------------- %s ---------------"%(rslt_path), file=logfile)
for min_disparity, max_disparity, max_offset_target, max_offset_result, strength_pow, fgbg_mode in variants:
good_tiles = not_nan.copy();
if fgbg_mode == 1:
good_tiles &= fgbg_single
elif fgbg_mode == 2:
good_tiles &= fgbg_single2
good_tiles &= (gt_disparity >= min_disparity)
good_tiles &= (gt_disparity <= max_disparity)
# good_tiles &= (target_disparity != gt_disparity)
good_tiles &= (np.abs(heurist_disparity - gt_disparity) <= max_offset_target)
good_tiles &= (np.abs(target_disparity - gt_disparity) <= max_offset_target)
good_tiles &= (np.abs(target_disparity - nn_disparity) <= max_offset_result)
gt_w = gt_strength * good_tiles
if strength_pow > 0: # power (0,0) = 1.0
gt_w = np.power(gt_w,strength_pow)
else:
gt_w = good_tiles * 1.0
sw = gt_w.sum()
diff0 = heurist_disparity - gt_disparity
diff1 = nn_disparity - gt_disparity
diff0_2w = gt_w*diff0*diff0
diff1_2w = gt_w*diff1*diff1
rms0 = np.sqrt(diff0_2w.sum()/sw)
rms1 = np.sqrt(diff1_2w.sum()/sw)
print ("%7.3f= min_disparity)
good_tiles &= (gt_disparity <= max_disparity)
good_tiles &= (target_disparity != gt_disparity)
good_tiles &= (np.abs(target_disparity - gt_disparity) <= max_offset_target)
good_tiles &= (np.abs(target_disparity - nn_disparity) <= max_offset_result)
gt_w = gt_strength * good_tiles
gt_w = np.power(gt_w,strength_pow)
sw = gt_w.sum()
diff0 = target_disparity - gt_disparity
diff1 = nn_disparity - gt_disparity
diff0_2w = gt_w*diff0*diff0
diff1_2w = gt_w*diff1*diff1
rms0 = np.sqrt(diff0_2w.sum()/sw)
rms1 = np.sqrt(diff1_2w.sum()/sw)
print ("%7.3f= var) and
((i // side) < (side - var)) and
((i % side) >= var) and
((i % side) < (side - var)) for i in range (side*side) ] for var in range(radius+1)]
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/qcstereo_losses.py 0000664 0000000 0000000 00000037274 13517677053 0024211 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from numpy import float64
#import numpy as np
import tensorflow as tf
def smoothLoss(out_batch, # [batch_size,(1..2)] tf_result
target_disparity_batch, # [batch_size] tf placeholder
gt_ds_batch_clust, # [batch_size,25,2] tf placeholder
clip, # limit punishment for cutting corners (disparity pix)
absolute_disparity = False, #when false there should be no activation on disparity output !
cluster_radius = 2):
with tf.name_scope("SmoothLoss"):
center_tile_index = 2 * cluster_radius * (cluster_radius + 1)
cluster_side = 2 * cluster_radius + 1
cluster_size = cluster_side * cluster_side
w_corner = 0.7
w8 = [w_corner,1.0,w_corner,1.0,1.0,w_corner,1.0,w_corner]
w8 = [w/sum(w8) for w in w8]
tf_w8=tf.reshape(tf.constant(w8, dtype=tf.float32, name="w8_"), shape=[1,-1], name="w8")
i8 = []
for dy in [-1,0,1]:
for dx in [-1,0,1]:
if (dy != 0) or (dx != 0):
i8.append(center_tile_index+(dy*cluster_side)+dx)
tf_clip = tf.constant(clip, dtype=tf.float32, name = "clip")
tf_gt_ds_all = tf.reshape(gt_ds_batch_clust,[-1,cluster_size,gt_ds_batch_clust.shape[1]//cluster_size], name = "gt_ds_all")
tf_neibs8 = tf.gather(tf_gt_ds_all, indices = i8, axis = 1, name = "neibs8")
tf_gt_disparity8 = tf.reshape(tf_neibs8[:,:,0], [-1,8], name = "gt8_disparity") # (?,8)
tf_gt_strength8 = tf.reshape(tf_neibs8[:,:,1], [-1,8], name = "gt8_strength") # (?,8)
tf_w = tf.multiply(tf_gt_strength8, tf_w8, name = "w")
tf_dw = tf.multiply(tf_gt_disparity8, tf_w, name = "dw")
tf_sum_w = tf.reduce_sum(tf_w, axis = 1, name = "sum_w")
tf_sum_dw = tf.reduce_sum(tf_dw, axis = 1, name = "sum_dw")
tf_avg_disparity = tf.divide(tf_sum_dw, tf_sum_w, name = "avg_disparity") # (?,)
tf_gt_disparity = tf.reshape(tf_gt_ds_all[:,center_tile_index,0], [-1], name = "gt_disparity") # (?,)
"""
It is good to limit tf_gt_disparityby min/max (+margin) tf.reduce_min(tf_gt_disparity8, axis=1,...) but there could be zeros caused by undefined GT for the tile
"""
tf_gt_strength = tf.reshape(tf_gt_ds_all[:,center_tile_index,1], [-1], name = "gt_strength") # (?,)
tf_d0 = tf.abs(tf_gt_disparity - tf_avg_disparity, name = "tf_d0")
tf_d = tf.maximum(tf_d0, 0.001, name = "tf_d")
## tf_d2 = tf.multiply(tf_d, tf_d, name = "tf_d2")
tf_out = tf.reshape(out_batch[:,0],[-1], name = "tf_out")
if absolute_disparity:
tf_out_disparity = tf_out
else:
tf_out_disparity = tf.add(tf_out, tf.reshape(target_disparity_batch,[-1]),name = "out_disparity")
tf_offs = tf.subtract(tf_out_disparity, tf_avg_disparity, name = "offs")
tf_offs2 = tf.multiply(tf_offs, tf_offs, name = "offs2")
tf_offs2_d = tf.divide(tf_offs2, tf_d, name = "offs2_d")
tf_cost0 = tf.maximum(tf_d - tf_offs2_d, 0.0, name = "cost0")
tf_cost_nw = tf.minimum(tf_cost0, tf_clip, name = "cost_nw")
## tf_cost_nw = tf.maximum(tf_d2 - tf_offs2, 0.0, name = "cost_nw")
tf_cost_w = tf.multiply(tf_cost_nw, tf_gt_strength, name = "cost_w")
tf_sum_wc = tf.reduce_sum(tf_gt_strength, name = "sum_wc")
tf_sum_costw = tf.reduce_sum(tf_cost_w, name = "sum_costw")
tf_cost = tf.divide(tf_sum_costw, tf_sum_wc, name = "cost")
return tf_cost, tf_cost_nw, tf_cost_w, tf_d , tf_avg_disparity, tf_gt_disparity, tf_offs
def batchLoss(out_batch, # [batch_size,(1..2)] tf_result
target_disparity_batch, # [batch_size] tf placeholder
gt_ds_batch, # [batch_size,2] tf placeholder
batch_weights, # [batch_size] now batch index % 4 - different sources, even - low variance, odd - high variance
disp_diff_cap = 10.0, # cap disparity difference to this value (give up on large errors)
disp_diff_slope= 0.0, #allow squared error to grow above disp_diff_cap
absolute_disparity = False, #when false there should be no activation on disparity output !
use_confidence = False,
lambda_conf_avg = 0.01,
## lambda_conf_pwr = 0.1,
conf_pwr = 2.0,
gt_conf_offset = 0.08,
gt_conf_pwr = 1.0,
error2_offset = 0.0025, # 0.0, # 0.0025, # (0.05^2) ~= coring
disp_wmin = 1.0, # minimal disparity to apply weight boosting for small disparities
disp_wmax = 8.0, # maximal disparity to apply weight boosting for small disparities
use_out = False): # use calculated disparity for disparity weight boosting (False - use target disparity)
with tf.name_scope("BatchLoss"):
"""
Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
"""
tf_lambda_conf_avg = tf.constant(lambda_conf_avg, dtype=tf.float32, name="tf_lambda_conf_avg")
## tf_lambda_conf_pwr = tf.constant(lambda_conf_pwr, dtype=tf.float32, name="tf_lambda_conf_pwr")
## tf_conf_pwr = tf.constant(conf_pwr, dtype=tf.float32, name="tf_conf_pwr")
tf_gt_conf_offset = tf.constant(gt_conf_offset, dtype=tf.float32, name="tf_gt_conf_offset")
tf_gt_conf_pwr = tf.constant(gt_conf_pwr, dtype=tf.float32, name="tf_gt_conf_pwr")
tf_num_tiles = tf.shape(gt_ds_batch)[0]
tf_0f = tf.constant(0.0, dtype=tf.float32, name="tf_0f")
tf_1f = tf.constant(1.0, dtype=tf.float32, name="tf_1f")
## tf_maxw = tf.constant(1.0, dtype=tf.float32, name="tf_maxw")
tf_disp_diff_cap2= tf.constant(disp_diff_cap*disp_diff_cap, dtype=tf.float32, name="disp_diff_cap2")
tf_disp_diff_slope= tf.constant(disp_diff_slope, dtype=tf.float32, name="disp_diff_slope")
if gt_conf_pwr == 0:
w = tf.ones((out_batch.shape[0]), dtype=tf.float32,name="w_ones")
else:
w_slice = tf.reshape(gt_ds_batch[:,1],[-1], name = "w_gt_slice")
w_sub = tf.subtract (w_slice, tf_gt_conf_offset, name = "w_sub")
w_clip = tf.maximum(w_sub, tf_0f, name = "w_clip")
if gt_conf_pwr == 1.0:
w = w_clip
else:
w=tf.pow(w_clip, tf_gt_conf_pwr, name = "w_pow")
if use_confidence:
tf_num_tilesf = tf.cast(tf_num_tiles, dtype=tf.float32, name="tf_num_tilesf")
conf_slice = tf.reshape(out_batch[:,1],[-1], name = "conf_slice")
conf_sum = tf.reduce_sum(conf_slice, name = "conf_sum")
conf_avg = tf.divide(conf_sum, tf_num_tilesf, name = "conf_avg")
conf_avg1 = tf.subtract(conf_avg, tf_1f, name = "conf_avg1")
conf_avg2 = tf.square(conf_avg1, name = "conf_avg2")
cost2 = tf.multiply (conf_avg2, tf_lambda_conf_avg, name = "cost2")
iconf_avg = tf.divide(tf_1f, conf_avg, name = "iconf_avg")
nconf = tf.multiply (conf_slice, iconf_avg, name = "nconf") #normalized confidence
nconf_pwr = tf.pow(nconf, conf_pwr, name = "nconf_pwr")
nconf_pwr_sum = tf.reduce_sum(nconf_pwr, name = "nconf_pwr_sum")
nconf_pwr_offs = tf.subtract(nconf_pwr_sum, tf_1f, name = "nconf_pwr_offs")
cost3 = tf.multiply (conf_avg2, nconf_pwr_offs, name = "cost3")
w_all = tf.multiply (w, nconf, name = "w_all")
else:
w_all = w
# cost2 = 0.0
# cost3 = 0.0
# normalize weights
w_sum = tf.reduce_sum(w_all, name = "w_sum")
iw_sum = tf.divide(tf_1f, w_sum, name = "iw_sum")
w_norm = tf.multiply (w_all, iw_sum, name = "w_norm")
disp_slice = tf.reshape(out_batch[:,0],[-1], name = "disp_slice")
d_gt_slice = tf.reshape(gt_ds_batch[:,0],[-1], name = "d_gt_slice")
td_flat = tf.reshape(target_disparity_batch,[-1], name = "td_flat")
if absolute_disparity:
adisp = disp_slice
else:
adisp = tf.add(disp_slice, td_flat, name = "adisp")
out_diff = tf.subtract(adisp, d_gt_slice, name = "out_diff")
out_diff2 = tf.square(out_diff, name = "out_diff2")
pre_cap0 = tf.abs(out_diff, name = "pre_cap0")
pre_cap = tf.multiply(pre_cap0, tf_disp_diff_slope, name = "pre_cap")
diff_cap = tf.add(pre_cap, tf_disp_diff_cap2, name = "diff_cap")
out_diff2_capped = tf.minimum(out_diff2, diff_cap, name = "out_diff2_capped")
out_wdiff2 = tf.multiply (out_diff2_capped, w_norm, name = "out_wdiff2")
cost1 = tf.reduce_sum(out_wdiff2, name = "cost1")
out_diff2_offset = tf.subtract(out_diff2, error2_offset, name = "out_diff2_offset")
out_diff2_biased = tf.maximum(out_diff2_offset, 0.0, name = "out_diff2_biased")
# calculate disparity-based weight boost
if use_out:
dispw = tf.clip_by_value(adisp, disp_wmin, disp_wmax, name = "dispw")
else:
dispw = tf.clip_by_value(td_flat, disp_wmin, disp_wmax, name = "dispw")
dispw_boost = tf.divide(disp_wmax, dispw, name = "dispw_boost")
dispw_comp = tf.multiply (dispw_boost, w_norm, name = "dispw_comp") #HERE??
if batch_weights.shape[0] > 1:
dispw_batch = tf.multiply (dispw_comp, batch_weights, name = "dispw_batch")# apply weights for high/low variance and sources
else:
dispw_batch = tf.multiply (dispw_comp, tf_1f, name = "dispw_batch")# apply weights for high/low variance and sources
dispw_sum = tf.reduce_sum(dispw_batch, name = "dispw_sum")
idispw_sum = tf.divide(tf_1f, dispw_sum, name = "idispw_sum")
dispw_norm = tf.multiply (dispw_batch, idispw_sum, name = "dispw_norm")
out_diff2_wbiased = tf.multiply(out_diff2_biased, dispw_norm, name = "out_diff2_wbiased")
# out_diff2_wbiased = tf.multiply(out_diff2_biased, w_norm, name = "out_diff2_wbiased")
cost1b = tf.reduce_sum(out_diff2_wbiased, name = "cost1b")
if use_confidence:
cost12 = tf.add(cost1b, cost2, name = "cost12")
cost123 = tf.add(cost12, cost3, name = "cost123")
return cost123, disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
else:
return cost1b, disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
def weightsLoss(inp_weights,
tile_layers,
tile_side,
wborders_zero):
# [batch_size,(1..2)] tf_result
# weights_lambdas): # single lambda or same length as inp_weights.shape[1]
"""
Enforcing 'smooth' weights for the input 2d correlation tiles
@return mean squared difference for each weight and average of 8 neighbors divided by mean squared weights
"""
weight_ortho = 1.0
weight_diag = 0.7
sw = 4.0 * (weight_ortho + weight_diag)
weight_ortho /= sw
weight_diag /= sw
# w_neib = tf.const([[weight_diag, weight_ortho, weight_diag],
# [weight_ortho, -1.0, weight_ortho],
# [weight_diag, weight_ortho, weight_diag]])
#WBORDERS_ZERO
with tf.name_scope("WeightsLoss"):
# Adding 1 tile border
# tf_inp = tf.reshape(inp_weights[:TILE_LAYERS * TILE_SIZE,:], [TILE_LAYERS, FILE_TILE_SIDE, FILE_TILE_SIDE, inp_weights.shape[1]], name = "tf_inp")
tf_inp = tf.reshape(inp_weights[:tile_layers * tile_side * tile_side,:], [tile_layers, tile_side, tile_side, inp_weights.shape[1]], name = "tf_inp")
if wborders_zero:
tf_zero_col = tf.constant(0.0, dtype=tf.float32, shape=[tf_inp.shape[0], tf_inp.shape[1], 1, tf_inp.shape[3]], name = "tf_zero_col")
tf_zero_row = tf.constant(0.0, dtype=tf.float32, shape=[tf_inp.shape[0], 1 , tf_inp.shape[2] + 2, tf_inp.shape[3]], name = "tf_zero_row")
tf_inp_ext_h = tf.concat([tf_zero_col, tf_inp, tf_zero_col ], axis = 2, name ="tf_inp_ext_h")
tf_inp_ext = tf.concat([tf_zero_row, tf_inp_ext_h, tf_zero_row ], axis = 1, name ="tf_inp_ext")
else:
tf_inp_ext_h = tf.concat([tf_inp [:, :, :1, :], tf_inp, tf_inp [:, :, -1:, :]], axis = 2, name ="tf_inp_ext_h")
tf_inp_ext = tf.concat([tf_inp_ext_h [:, :1, :, :], tf_inp_ext_h, tf_inp_ext_h[:, -1:, :, :]], axis = 1, name ="tf_inp_ext")
s_ortho = tf_inp_ext[:,1:-1,:-2,:] + tf_inp_ext[:,1:-1, 2:,:] + tf_inp_ext[:,1:-1,:-2,:] + tf_inp_ext[:,1:-1, 2:, :]
s_corn = tf_inp_ext[:, :-2,:-2,:] + tf_inp_ext[:, :-2, 2:,:] + tf_inp_ext[:,2:, :-2,:] + tf_inp_ext[:,2: , 2:, :]
w_diff = tf.subtract(tf_inp, s_ortho * weight_ortho + s_corn * weight_diag, name="w_diff")
w_diff2 = tf.multiply(w_diff, w_diff, name="w_diff2")
w_var = tf.reduce_mean(w_diff2, name="w_var")
w2_mean = tf.reduce_mean(inp_weights * inp_weights, name="w2_mean")
w_rel = tf.divide(w_var, w2_mean, name= "w_rel")
return w_rel # scalar, cost for weights non-smoothness in 2d
lwir-nn-4b02b28381749fc6d3eff15ceb0dccaa4f2e606b/qcstereo_network.py 0000664 0000000 0000000 00000026653 13517677053 0024371 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
#from numpy import float64
#import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
NN_LAYOUTS = {0:[0, 0, 0, 32, 20, 16],
1:[0, 0, 0, 256, 128, 64],
2:[0, 128, 32, 32, 32, 16],
3:[0, 0, 40, 32, 20, 16],
4:[0, 0, 0, 0, 16, 16],
5:[0, 0, 64, 32, 32, 16],
6:[0, 0, 32, 16, 16, 16],
7:[0, 0, 64, 16, 16, 16],
8:[0, 0, 0, 64, 20, 16],
9:[0, 0, 256, 64, 32, 16],
10:[0, 256, 128, 64, 32, 16],
11:[0, 0, 0, 0, 64, 32],
12:[0, 0, 256, 128, 64, 32],
13:[0, 0, 0, 256, 128, 32],
}
def lrelu(x):
return tf.maximum(x*0.2,x)
# return tf.nn.relu(x)
def sym_inputs8(inp, cluster_radius = 2):
"""
get input vector [?:4*9*9+1] (last being target_disparity) and reorder for horizontal flip,
vertical flip and transpose (8 variants, mode + 1 - hor, +2 - vert, +4 - transpose)
return same lengh, reordered
"""
tile_side = 2 * cluster_radius + 1
with tf.name_scope("sym_inputs8"):
td = inp[:,-1:] # tf.reshape(inp,[-1], name = "td")[-1]
inp_corr = tf.reshape(inp[:,:-1],[-1,4,tile_side,tile_side], name = "inp_corr")
inp_corr_h = tf.stack([-inp_corr [:,0,:,-1::-1], inp_corr [:,1,:,-1::-1], -inp_corr [:,3,:,-1::-1], -inp_corr [:,2,:,-1::-1]], axis=1, name = "inp_corr_h")
inp_corr_v = tf.stack([ inp_corr [:,0,-1::-1,:],-inp_corr [:,1,-1::-1,:], inp_corr [:,3,-1::-1,:], inp_corr [:,2,-1::-1,:]], axis=1, name = "inp_corr_v")
inp_corr_hv = tf.stack([ inp_corr_h[:,0,-1::-1,:],-inp_corr_h[:,1,-1::-1,:], inp_corr_h[:,3,-1::-1,:], inp_corr_h[:,2,-1::-1,:]], axis=1, name = "inp_corr_hv")
inp_corr_t = tf.stack([tf.transpose(inp_corr [:,1], perm=[0,2,1]),
tf.transpose(inp_corr [:,0], perm=[0,2,1]),
tf.transpose(inp_corr [:,2], perm=[0,2,1]),
-tf.transpose(inp_corr [:,3], perm=[0,2,1])], axis=1, name = "inp_corr_t")
inp_corr_ht = tf.stack([tf.transpose(inp_corr_h [:,1], perm=[0,2,1]),
tf.transpose(inp_corr_h [:,0], perm=[0,2,1]),
tf.transpose(inp_corr_h [:,2], perm=[0,2,1]),
-tf.transpose(inp_corr_h [:,3], perm=[0,2,1])], axis=1, name = "inp_corr_ht")
inp_corr_vt = tf.stack([tf.transpose(inp_corr_v [:,1], perm=[0,2,1]),
tf.transpose(inp_corr_v [:,0], perm=[0,2,1]),
tf.transpose(inp_corr_v [:,2], perm=[0,2,1]),
-tf.transpose(inp_corr_v [:,3], perm=[0,2,1])], axis=1, name = "inp_corr_vt")
inp_corr_hvt = tf.stack([tf.transpose(inp_corr_hv[:,1], perm=[0,2,1]),
tf.transpose(inp_corr_hv[:,0], perm=[0,2,1]),
tf.transpose(inp_corr_hv[:,2], perm=[0,2,1]),
-tf.transpose(inp_corr_hv[:,3], perm=[0,2,1])], axis=1, name = "inp_corr_hvt")
# return td, [inp_corr, inp_corr_h, inp_corr_v, inp_corr_hv, inp_corr_t, inp_corr_ht, inp_corr_vt, inp_corr_hvt]
"""
return [tf.concat([tf.reshape(inp_corr, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr"),
tf.concat([tf.reshape(inp_corr_h, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_h"),
tf.concat([tf.reshape(inp_corr_v, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_v"),
tf.concat([tf.reshape(inp_corr_hv, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_hv"),
tf.concat([tf.reshape(inp_corr_t, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_t"),
tf.concat([tf.reshape(inp_corr_ht, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_ht"),
tf.concat([tf.reshape(inp_corr_vt, [inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_vt"),
tf.concat([tf.reshape(inp_corr_hvt,[inp_corr.shape[0],-1]),td], axis=1,name = "out_corr_hvt")]
"""
cl = 4 * tile_side * tile_side
return [tf.concat([tf.reshape(inp_corr, [-1,cl]),td], axis=1,name = "out_corr"),
tf.concat([tf.reshape(inp_corr_h, [-1,cl]),td], axis=1,name = "out_corr_h"),
tf.concat([tf.reshape(inp_corr_v, [-1,cl]),td], axis=1,name = "out_corr_v"),
tf.concat([tf.reshape(inp_corr_hv, [-1,cl]),td], axis=1,name = "out_corr_hv"),
tf.concat([tf.reshape(inp_corr_t, [-1,cl]),td], axis=1,name = "out_corr_t"),
tf.concat([tf.reshape(inp_corr_ht, [-1,cl]),td], axis=1,name = "out_corr_ht"),
tf.concat([tf.reshape(inp_corr_vt, [-1,cl]),td], axis=1,name = "out_corr_vt"),
tf.concat([tf.reshape(inp_corr_hvt,[-1,cl]),td], axis=1,name = "out_corr_hvt")]
# inp_corr_h, inp_corr_v, inp_corr_hv, inp_corr_t, inp_corr_ht, inp_corr_vt, inp_corr_hvt]
def network_sub(input_tensor,
input_global, #add to all layers (but first) if not None
layout,
reuse,
sym8 = False,
cluster_radius = 2):
# last_indx = None;
fc = []
inp_weights = []
for i, num_outs in enumerate (layout):
if num_outs:
if fc:
if input_global is None:
inp = fc[-1]
else:
inp = tf.concat([fc[-1], input_global], axis = 1)
fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu, scope='g_fc_sub'+str(i), reuse = reuse))
else:
inp = input_tensor
if sym8:
inp8 = sym_inputs8(inp, cluster_radius)
num_non_sum = num_outs % len(inp8) # if number of first layer outputs is not multiple of 8
num_sym8 = num_outs // len(inp8) # number of symmetrical groups
fc_sym = []
for j in range (len(inp8)): # ==8
reuse_this = reuse | (j > 0)
scp = 'g_fc_sub'+str(i)
fc_sym.append(slim.fully_connected(inp8[j], num_sym8, activation_fn=lrelu, scope= scp, reuse = reuse_this))
if not reuse_this:
with tf.compat.v1.variable_scope(scp,reuse=True) : # tf.AUTO_REUSE):
inp_weights.append(tf.compat.v1.get_variable('weights')) # ,shape=[inp.shape[1],num_outs]))
if num_non_sum > 0:
reuse_this = reuse
scp = 'g_fc_sub'+str(i)+"r"
fc_sym.append(slim.fully_connected(inp, num_non_sum, activation_fn=lrelu, scope=scp, reuse = reuse_this))
if not reuse_this:
with tf.compat.v1.variable_scope(scp,reuse=True) : # tf.AUTO_REUSE):
inp_weights.append(tf.compat.v1.get_variable('weights')) # ,shape=[inp.shape[1],num_outs]))
fc.append(tf.concat(fc_sym, 1, name='sym_input_layer'))
else:
scp = 'g_fc_sub'+str(i)
fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu, scope= scp, reuse = reuse))
if not reuse:
with tf.compat.v1.variable_scope(scp, reuse=True) : # tf.AUTO_REUSE):
inp_weights.append(tf.compat.v1.get_variable('weights')) # ,shape=[inp.shape[1],num_outs]))
return fc[-1], inp_weights
def network_inter(input_tensor,
input_global, #add to all layers (but first) if not None
layout,
reuse=False,
use_confidence=False):
#last_indx = None;
fc = []
for i, num_outs in enumerate (layout):
if num_outs:
if fc:
if input_global is None:
inp = fc[-1]
else:
inp = tf.concat([fc[-1], input_global], axis = 1)
else:
inp = input_tensor
fc.append(slim.fully_connected(inp, num_outs, activation_fn=lrelu, scope='g_fc_inter'+str(i), reuse = reuse))
if use_confidence:
fc_out = slim.fully_connected(fc[-1], 2, activation_fn=lrelu, scope='g_fc_inter_out', reuse = reuse)
else:
fc_out = slim.fully_connected(fc[-1], 1, activation_fn=None, scope='g_fc_inter_out', reuse = reuse)
#If using residual disparity, split last layer into 2 or remove activation and add rectifier to confidence only
return fc_out
def networks_siam(input_tensor, # now [?,9,325]-> [?,25,325]
input_global, # add to all layers (but first) if not None
layout1,
layout2,
inter_convergence,
sym8 = False,
only_tile = None, # just for debugging - feed only data from the center sub-network
partials = None,
use_confidence=False,
cluster_radius = 2):
center_index = (input_tensor.shape[1] - 1) // 2
with tf.name_scope("Siam_net"):
inp_weights = []
num_legs = input_tensor.shape[1] # == 25
if partials is None:
partials = [[True] * num_legs]
inter_lists = [[] for _ in partials]
reuse = False
for i in range (num_legs):
if ((only_tile is None) or (i == only_tile)) and any([p[i] for p in partials]) :
if input_global is None:
ig = None
else:
ig =input_global[:,i,:]
ns, ns_weights = network_sub(input_tensor[:,i,:],
ig, # input_global[:,i,:],
layout= layout1,
reuse= reuse,
sym8 = sym8,
cluster_radius = cluster_radius)
for n, partial in enumerate(partials):
if partial[i]:
inter_lists[n].append(ns)
else:
inter_lists[n].append(tf.zeros_like(ns))
inp_weights += ns_weights
reuse = True
outs = []
for n, _ in enumerate(partials):
if input_global is None:
ig = None
else:
ig =input_global[:,center_index,:]
outs.append(network_inter (input_tensor = tf.concat(inter_lists[n],
axis=1,
name='inter_tensor'+str(n)),
input_global = [None, ig][inter_convergence], # optionally feed all convergence values (from each tile of a cluster)
layout = layout2,
reuse = (n > 0),
use_confidence = use_confidence))
return outs, inp_weights