pax_global_header 0000666 0000000 0000000 00000000064 13514453450 0014516 g ustar 00root root 0000000 0000000 52 comment=2c44046de3b521c386ae33bec9c0eb5075f4477f
lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/ 0000775 0000000 0000000 00000000000 13514453450 0020102 5 ustar 00root root 0000000 0000000 lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/.gitignore 0000664 0000000 0000000 00000000060 13514453450 0022066 0 ustar 00root root 0000000 0000000 __pycache__
/.project
/.pydevproject
attic
*.log lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/README.md 0000664 0000000 0000000 00000000036 13514453450 0021360 0 ustar 00root root 0000000 0000000 # lwir-nn
NN for LWIR 3D data lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/eclipse_project_setup/ 0000775 0000000 0000000 00000000000 13514453450 0024474 5 ustar 00root root 0000000 0000000 lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/eclipse_project_setup/.project 0000664 0000000 0000000 00000000551 13514453450 0026144 0 ustar 00root root 0000000 0000000
lwir-nn
org.python.pydev.PyDevBuilder
org.python.pydev.pythonNature
lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/eclipse_project_setup/.pydevproject 0000664 0000000 0000000 00000001132 13514453450 0027210 0 ustar 00root root 0000000 0000000
Default
python interpreter
/${PROJECT_DIR_NAME}
/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/
lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/explore_data6.py 0000664 0000000 0000000 00000310423 13514453450 0023214 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
#from numpy import float64
from tensorflow.contrib.image.ops.gen_distort_image_ops import adjust_hsv_in_yiq
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "andrey@elphel.com"
import os
import sys
import glob
import imagej_tiff as ijt
import numpy as np
import resource
import re
#import timeit
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import time
import tensorflow as tf
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
TIME_START = time.time()
TIME_LAST = TIME_START
def print_time(txt="",end="\n"):
global TIME_LAST
t = time.time()
if txt:
txt +=" "
print(("%s"+bcolors.BOLDWHITE+"at %.4fs (+%.4fs)"+bcolors.ENDC)%(txt,t-TIME_START,t-TIME_LAST), end = end)
TIME_LAST = t
def _dtype_feature(ndarray):
"""match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64:
return lambda array: tf.train.Feature(int64_list=tf.train.Int64List(value=array))
else:
raise ValueError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
def readTFRewcordsEpoch(train_filename):
# filenames = [train_filename]
# dataset = tf.data.TFRecordDataset(filenames)
if not '.tfrecords' in train_filename:
train_filename += '.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=train_filename)
corr2d_list=[]
target_disparity_list=[]
gt_ds_list = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
corr2d_list.append(np.array(example.features.feature['corr2d'] .float_list .value))
target_disparity_list.append(np.array(example.features.feature['target_disparity'] .float_list .value[0]))
gt_ds_list.append(np.array(example.features.feature['gt_ds'] .float_list .value))
corr2d= np.array(corr2d_list)
target_disparity = np.array(target_disparity_list)
gt_ds = np.array(gt_ds_list)
return corr2d, target_disparity, gt_ds
#"/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
# PATTERN_CORRD = "-D*.tiff"
#1562390086_121105-DSI_GT-AUX.tiff
def writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
export_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR #TFR directory
):
debug = 1
scene = os.path.basename(os.path.dirname(os.path.dirname(model_ml_path))) #'1562390086_121105'
scene_version = os.path.basename(os.path.dirname(model_ml_path)) #'v01
fname = scene+'-'+scene_version+ ('-M%d-R%1.3f'%(export_mode,random_offset)).replace('.','_')
img_filenameTFR = os.path.join(pathTFR,'img',fname)
dsi_list = glob.glob(os.path.join(model_ml_path, ExploreData.PATTERN_CORRD))
if not dsi_list:
print ("DSI list is empty, nothing to do ...")
return
dsi_list.sort()
gt_aux=glob.glob(os.path.join(os.path.dirname(model_ml_path), ExploreData.PATTERN_GTAUX))[0]
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
#Get tiles data from the GT_AUX file
img_gt_aux = ijt.imagej_tiff(gt_aux,ijt.IJFGBG.DSI_NAMES) #["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
num_tiles = img_gt_aux.image.shape[0]*img_gt_aux.image.shape[1]
all_image_tiles = np.array(range(num_tiles))
#now read in all scanned files
indx = 0
dsis = np.empty((0))
dsis_other = np.empty((0))
for img_path in dsi_list:
tiff = ijt.imagej_tiff(img_path, corr_layers,all_image_tiles)
corr2d = tiff.corr2d.reshape((num_tiles,-1)) # [300][4*81]
payloads = tiff.payload # [300][11]
if not indx: # Create array when dimensions are known
dsis = np.empty((len(dsi_list), corr2d.shape[0], corr2d.shape[1]), corr2d.dtype)
dsis_other = np.empty((len(dsi_list), payloads.shape[0], payloads.shape[1]), payloads.dtype)
dsis[indx] = corr2d
dsis_other[indx] = payloads
indx += 1
pass
'''
Prepare target disparity from the gt_aux file, filling the gaps in GT data
'''
# if export_mode == 0 (default):
disparity = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
strength = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
if export_mode == 1:
disparity = img_gt_aux.image[...,ijt.IJFGBG.FG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.FG_STR]
elif export_mode == 2:
disparity = img_gt_aux.image[...,ijt.IJFGBG.BG_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.BG_STR]
elif export_mode == 3:
disparity = img_gt_aux.image[...,ijt.IJFGBG.AUX_DISP]
strength = img_gt_aux.image[...,ijt.IJFGBG.AUX_STR]
if export_mode == 3:
d_gt = img_gt_aux.image[...,ijt.IJFGBG.DISPARITY]
s_gt = img_gt_aux.image[...,ijt.IJFGBG.STRENGTH]
else:
d_gt = disparity
s_gt = strength
#next values may be modified to fill gaps, so copy them before
if debug > 1:
mytitle = "Disparity with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)# d_gt.flatten)
plt.colorbar()
mytitle = "Strength with gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt) # s_gt.flatten)
plt.colorbar()
d_gt = np.copy(d_gt)
s_gt = np.copy(s_gt)
'''
fill gaps: up,down,right,left until done
'''
fillGapsByLaplacian(
d_gt, # val, # will be modified in place
s_gt, # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6)
if debug > 1:
mytitle = "Disparity w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(d_gt)
plt.colorbar()
mytitle = "Strength w/o gaps"
fig = plt.figure()
fig.canvas.set_window_title(scene+mytitle)
fig.suptitle(mytitle)
plt.imshow(s_gt)
plt.colorbar()
disparity = disparity.flatten()
strength = strength.flatten()
d_gt = d_gt.flatten()
s_gt = s_gt.flatten()
'''
Assemble synthetic image, selecting each tile from the nearest available disparity sweep file
'''
corr2d = np.zeros((dsis.shape[1],dsis.shape[2]),dsis.dtype)
target_disparity = np.zeros((dsis.shape[1], 1),dsis.dtype)
gt_ds = np.zeros((dsis.shape[1], 2),dsis.dtype)
for nt in range(num_tiles):
d = disparity[nt]
add_random = (export_mode != 3)
if strength[nt] <= 0.0:
d = d_gt[nt]
add_random = True
best_indx = 0
dmn = d
dmx = d
if add_random:
dmn -= random_offset
dmx += random_offset
fit_list = []
for indx in range (dsis_other.shape[0]):
dsi_d = dsis_other[indx][nt][ijt.IJML.TARGET]
if abs (dsi_d - d) < abs (dsis_other[best_indx][nt][ijt.IJML.TARGET] - d):
best_indx = indx
if (dsi_d >= dmn) and (dsi_d <= dmx):
fit_list.append(indx)
if not len(fit_list):
fit_list.append(best_indx)
#select random index from the list - even if no random (it will just be a 1-element list then)
indx = np.random.choice(fit_list) # possible to add weights
target_disparity[nt][0] = dsis_other[indx][nt][ijt.IJML.TARGET]
gt_ds[nt][0] = d_gt[nt]
gt_ds[nt][1] = s_gt[nt]
corr2d[nt] = dsis[indx][nt]
if debug > 0:
tilesX = img_gt_aux.image.shape[1]
tilesY = img_gt_aux.image.shape[0]
tileH = tiff.tileH
tileW = tiff.tileW
ncorr2_layers = corr2d.shape[1]//(tileH * tileW)
mytitle = "Target Disparity"
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(target_disparity.reshape((tilesY, tilesX)))
plt.colorbar()
dbg_corr2d = np.zeros((tilesY * tileH, tilesX*tileW, ncorr2_layers), corr2d.dtype)
for tileY in range(tilesY):
for tileX in range(tilesX):
for nl in range(ncorr2_layers):
dbg_corr2d[tileY * tileH : (tileY + 1) * tileH, tileX * tileW : (tileX + 1) * tileW, nl] = (
corr2d[tileY * tilesX + tileX].reshape((ncorr2_layers, tileH * tileW))[nl].reshape((tileH, tileW)))
pass
for nl in range(ncorr2_layers):
corr2d_layer =dbg_corr2d[:,:,nl]
mytitle = "Corr2D-"+str(nl)
fig = plt.figure()
fig.canvas.set_window_title(scene+": "+mytitle)
fig.suptitle(mytitle)
plt.imshow(corr2d_layer)
plt.colorbar()
#end of debug output
if not '.tfrecords' in img_filenameTFR:
img_filenameTFR += '.tfrecords'
tfr_filename=img_filenameTFR.replace(' ','_')
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
### writer = tf.python_io.TFRecordWriter(tfr_filename)
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
def fillGapsByLaplacian(
val, # will be modified in place
wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 10,
eps = 1E-6):
dirs = ((-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1))
wneib = ( 1.0, w_diag, 1.0, w_diag, 1.0, w_diag, 1.0, w_diag)
gap_tiles = []
gap_neibs = []
rows = val.shape[0]
cols = wght.shape[1]
for row in range(rows):
for col in range (cols):
if wght[row][col] <= 0.0:
neibs = []
for dr, neib in enumerate(dirs):
nrow = row + neib[0]
ncol = col + neib[1]
if (nrow >= 0) and (ncol >= 0) and (nrow < rows) and (ncol < cols):
neibs.append((nrow,ncol,dr))
gap_tiles.append((row,col))
gap_neibs.append(neibs)
if not len(gap_tiles):
return # no gaps to fill
valn = np.copy(val)
wghtn = np.copy(wght)
achange = eps * np.max(wght)
for _ in range (num_pass):
num_new = 1
max_diff = 0.0;
for tile, neibs in zip (gap_tiles, gap_neibs):
swn = 0.0
sw = 0.0
swd = 0.0;
for neib in neibs: # (row,col,direction)
w = wght[neib[0]][neib[1]] * wneib[neib[2]]
sw += w
if w > 0:
swd += w * val[neib[0]][neib[1]]
swn += wneib[neib[2]]
if (sw > 0):
valn [tile[0]][tile[1]] = swd/sw
wghtn[tile[0]][tile[1]] = w_reduce * sw/swn
if (wght[tile[0]][tile[1]]) <= 0:
num_new += 1
wdiff = abs(wghtn[tile[0]][tile[1]] - wght[tile[0]][tile[1]])
max_diff = max(max_diff, wdiff)
np.copyto(val, valn)
np.copyto(wght, wghtn)
if (num_new == 0) and (max_diff < achange):
break
def writeTFRewcordsImageTiles(img_path, tfr_filename): # test_set=False):
num_tiles = 242*324 # fixme
all_image_tiles = np.array(range(num_tiles))
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
img = ijt.imagej_tiff(img_path, corr_layers, all_image_tiles)
"""
Values read from correlation file, it now may differ from the COMBO-DSI:
1) The target disparities used for correlations are replaced if they are too far from the rig (GT) values and
replaced by interpolation from available neighbors. If there are no suitable neighbors, target disparity is
derived from the rig data by adding a random offset (specified in ImageJ plugin configuration ML section)
2) correlation is performed around the defined tiles extrapolating disparity. rig data may be 0 disparity,
0 strength if there is no rig data for those tiles. That means that such tiles can only be used as peripherals
i (now 5x5) clusters, not for the cluster centers where GT is needed.
"""
corr2d = img.corr2d.reshape((num_tiles,-1))
target_disparity = img.target_disparity.reshape((num_tiles,-1))
gt_ds = img.gt_ds.reshape((num_tiles,-1))
"""
Replace GT data with zero strength with nan, zero strength
nan2 = np.array((np.nan,0), dtype=np.float32)
gt_ds[np.where(gt_ds[:,1]==0)] = nan2
"""
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
try:
os.makedirs(os.path.dirname(tfr_filename))
except:
pass
writer = tf.io.TFRecordWriter(tfr_filename)
dtype_feature_corr2d = _dtype_feature(corr2d)
dtype_target_disparity = _dtype_feature(target_disparity)
dtype_feature_gt_ds = _dtype_feature(gt_ds)
for i in range(num_tiles):
x = corr2d[i].astype(np.float32)
y = target_disparity[i].astype(np.float32)
z = gt_ds[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
pass
writer.close()
sys.stdout.flush()
class ExploreData:
"""
TODO: add to constructor parameters
"""
PATTERN = "*-DSI_COMBO.tiff"
PATTERN_GTAUX = "*-DSI_GT-AUX.tiff"
PATTERN_CORRD = "*-D*.tiff"
# ML_DIR = "ml"
# ML_PATTERN = "*-ML_DATA*OFFS*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN*.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN.tiff"
# ML_PATTERN = "*-ML_DATA*MAIN_RND*.tiff"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
# ML_PATTERN = "*-ML_DATA*OFFS-0.20000_0.20000.tiff"
"""
1527182801_296892-ML_DATARND-32B-O-FZ0.05-OFFS-0.20000_0.20000.tiff
1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
"""
#1562390086_121105-DSI_GT-AUX.tiff
def getComboList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def loadComboFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
layers = ['disparity_rig','strength_rig','disparity_main']
for combo_file in tlist:
tiff = ijt.imagej_tiff(combo_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def getGtAuxList(self, top_dir, latest_version_only):
if not top_dir:
return []
tlist = []
for i in range(5):
pp = top_dir#) ,'**', patt) # works
for _ in range (i):
pp = os.path.join(pp,'*')
pp = os.path.join(pp, ExploreData.PATTERN_GTAUX)
tlist += glob.glob(pp)
if (self.debug_level > 0):
print (pp+" "+str(len(tlist)))
if (self.debug_level > 0):
print("Found "+str(len(tlist))+" combo DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
if latest_version_only:
models = {}
for p in tlist:
model = os.path.dirname(os.path.dirname(p))
if (not model in models) or ( models[model]< p):
models[model] = p
tlist = [v for v in models.values()]
if (self.debug_level > 0):
print("After filtering the latest versions only, left "+str(len(tlist))+" GT/AUX DSI files in "+top_dir+" :")
if (self.debug_level > 1):
print("\n".join(tlist))
tlist.sort()
return tlist
def getMLSweepFiles(self,
gtaux_list,
ml_name = "ml32"):
files_list = []
target_disparities = []
for gtaux in gtaux_list:
# files_list.append([])
ml_path = os.path.join(os.path.dirname(gtaux),ml_name)
sweep_list = glob.glob(os.path.join(ml_path, ExploreData.PATTERN_CORRD))
sweep_list.sort()
disparities = np.zeros((len(sweep_list)),dtype=float)
for i,f in enumerate(sweep_list):
disparities[i] = float(re.search(".*-D([0-9.]*)\.tiff",f).groups()[0])
files_list.append(sweep_list)
target_disparities.append(disparities)
return files_list, target_disparities
def loadGtAuxFiles(self, tlist):
indx = 0
images = []
if (self.debug_level>2):
print(str(resource.getrusage(resource.RUSAGE_SELF)))
# IJFGBG.DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
layers = ijt.IJFGBG.DSI_NAMES
for gtaux_file in tlist:
tiff = ijt.imagej_tiff(gtaux_file,layers)
if not indx:
images = np.empty((len(tlist), tiff.image.shape[0],tiff.image.shape[1],tiff.image.shape[2]), tiff.image.dtype)
images[indx] = tiff.image
if (self.debug_level>2):
print(str(indx)+": "+str(resource.getrusage(resource.RUSAGE_SELF)))
indx += 1
return images
def selectDSPairFromGtaux(
self,
gtaux,
mode): #0 - average, 1 - FG, 2 - BG, 3 - AUX
ds_pair = np.empty((gtaux.shape[0],gtaux.shape[1],gtaux.shape[2], 3), dtype=gtaux.dtype)
if mode == 0:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.DISPARITY] # 0
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.STRENGTH] # 1
elif mode == 1:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.FG_DISP] # 4
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.FG_STR] # 5
elif mode == 2:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.BG_DISP] # 6
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.BG_STR] # 7
elif mode == 3:
ds_pair[:,:,:,0] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 8
ds_pair[:,:,:,1] = gtaux[:,:,:,ijt.IJFGBG.AUX_DISP] # 9
ds_pair[:,:,:,2] = gtaux[:,:,:, ijt.IJFGBG.AUX_DISP] # 8
for nf in range (ds_pair.shape[0]):
fillGapsByLaplacian(
ds_pair[nf,:,:,0], # val, # will be modified in place
ds_pair[nf,:,:,1], # wght, # will be modified in place
w_diag = 0.7,
w_reduce = 0.7,
num_pass = 20,
eps = 1E-6)
return ds_pair
def getHistogramDSI(
self,
list_rds,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
max_main_offset = 0.0,
normalize = True,
# no_histogram = False
):
good_tiles_list=[]
for combo_rds in list_rds:
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
good_tiles[ids] = disparity >= disparity_min_drop
good_tiles[ids] &= disparity <= disparity_max_drop
good_tiles[ids] &= strength >= strength_min_drop
good_tiles[ids] &= strength <= strength_max_drop
if max_main_offset > 0.0: #2.0
disparity_main = ds[...,2] #measured disparity (here aux_disp)?
good_tiles[ids] &= disparity_main <= (disparity + max_main_offset)
good_tiles[ids] &= disparity_main >= (disparity - max_main_offset)
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
np.clip(disparity, disparity_min_clip, disparity_max_clip, out = disparity)
np.clip(strength, strength_min_clip, strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(list_rds)
hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(), # average disparity from main
y = combo_rds[...,0].flatten(), # average strength from main
bins= (strength_bins, disparity_bins),
range= ((strength_min_clip,strength_max_clip),(disparity_min_clip,disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
for i, combo_rds in enumerate(list_rds):
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
combo_rds[ids][...,1]*= good_tiles_list[i][ids]
return hist, xedges, yedges
def __init__(self,
topdir_train,
topdir_test,
ml_subdir, #'ml32'
ml_pattern,
latest_version_only,
max_main_offset = 2.0, # > 0.0 - do not use main camera tiles with offset more than this
debug_level = 0,
disparity_bins = 1000,
strength_bins = 100,
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 100.0,
disparity_max_clip = 100.0,
strength_min_drop = 0.1,
strength_min_clip = 0.1,
strength_max_drop = 1.0,
strength_max_clip = 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
#new in LWIR mode
fgbg_mode = 0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT - individual tile
rnd_plate = 0.5, # use corr2d rendered with target disparity this far shuffled from the GT common for (5x5) plate
radius = 2):
# file name
self.debug_level = debug_level
self.ml_pattern = ml_pattern
self.ml_subdir = ml_subdir
#self.testImageTiles()
self.max_main_offset = max_main_offset
self.disparity_bins = disparity_bins
self.strength_bins = strength_bins
self.disparity_min_drop = disparity_min_drop
self.disparity_min_clip = disparity_min_clip
self.disparity_max_drop = disparity_max_drop
self.disparity_max_clip = disparity_max_clip
self.strength_min_drop = strength_min_drop
self.strength_min_clip = strength_min_clip
self.strength_max_drop = strength_max_drop
self.strength_max_clip = strength_max_clip
self.hist_sigma = hist_sigma # Blur log histogram
self.hist_cutoff= hist_cutoff # of maximal
self.fgbg_mode = fgbg_mode #0, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
self.rnd_tile = rnd_tile # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.rnd_plate = rnd_plate # 1.0 # use corr2d rendered with target disparity this far shuffled from the GT
self.radius = radius
self.pre_log_offs = 0.001 # of histogram maximum
self.good_tiles = None
### self.files_train = self.getComboList(topdir_train, latest_version_only)
### self.files_test = self.getComboList(topdir_test, latest_version_only)
self.files_train = self.getGtAuxList(topdir_train, latest_version_only)
self.files_test = self.getGtAuxList(topdir_test, latest_version_only)
# self.train_ds = self.loadGtAuxFiles(self.files_train)
# self.test_ds = self.loadGtAuxFiles(self.files_test)
# new in LWIR - all laysrs, including AG, FG, BG and AUX D/S pairs, RMS and RMS_SPLIT
self.train_gtaux = self.loadGtAuxFiles(self.files_train)
self.test_gtaux = self.loadGtAuxFiles(self.files_test)
self.train_ds = self.selectDSPairFromGtaux(self.train_gtaux, self.fgbg_mode)
self.test_ds = self.selectDSPairFromGtaux(self.test_gtaux, self.fgbg_mode)
self.train_sweep_files, self.train_sweep_disparities = self.getMLSweepFiles(self.files_train, self.ml_subdir)
self.test_sweep_files, self.test_sweep_disparities = self.getMLSweepFiles(self.files_test, self.ml_subdir)
self.num_tiles = self.train_ds.shape[1]*self.train_ds.shape[2]
self.hist, _, _ = self.getHistogramDSI(
list_rds = [self.train_ds,self.test_ds], # combo_rds,
disparity_bins = self.disparity_bins,
strength_bins = self.strength_bins,
disparity_min_drop = self.disparity_min_drop,
disparity_min_clip = self.disparity_min_clip,
disparity_max_drop = self.disparity_max_drop,
disparity_max_clip = self.disparity_max_clip,
strength_min_drop = self.strength_min_drop,
strength_min_clip = self.strength_min_clip,
strength_max_drop = self.strength_max_drop,
strength_max_clip = self.strength_max_clip,
max_main_offset = self.max_main_offset,
normalize = True
# no_histogram = False
)
log_offset = self.pre_log_offs * self.hist.max()
h_cutoff = hist_cutoff * self.hist.max()
lhist = np.log(self.hist + log_offset)
blurred_lhist = gaussian_filter(lhist, sigma = self.hist_sigma)
self.blurred_hist = np.exp(blurred_lhist) - log_offset
self.good_tiles = self.blurred_hist >= h_cutoff
self.blurred_hist *= self.good_tiles # set bad ones to zero
def exploreNeibs(self,
data_ds, # disparity/strength data for all files (train or test)
radius, # how far to look from center each side ( 1- 3x3, 2 - 5x5)
disp_thesh = 5.0): # reduce effective variance for higher disparities
"""
For each tile calculate difference between max and min among neighbors and number of qualifying neighbors (bad center is not removed)
data_ds may mismatch with the correlation files - correlation files have data in extrapolated areas and replaced for large difference with GT
"""
disp_min = np.empty_like(data_ds[...,0], dtype = np.float)
disp_max = np.empty_like(disp_min, dtype = np.float)
tile_neibs = np.zeros_like(disp_min, dtype = np.int)
dmin = data_ds[...,0].min()
dmax = data_ds[...,0].max()
good_tiles = self.getBB(data_ds) >= 0 # histogram index or -1 for bad tiles
side = 2 * radius + 1
for nf, ds in enumerate(data_ds):
disp = ds[...,0]
height = disp.shape[0]
width = disp.shape[1]
bad_max = np.ones((height+side, width+side), dtype=float) * dmax
bad_min = np.ones((height+side, width+side), dtype=float) * dmin
good = np.zeros((height+side, width+side), dtype=int)
#Assign centers of the array, replace bad tiles with max/min (so they will not change min/max)
bad_max[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmax)
bad_min[radius:height+radius,radius:width+radius] = np.select([good_tiles[nf]],[disp],default = dmin)
good [radius:height+radius,radius:width+radius] = good_tiles[nf]
disp_min [nf,...] = disp
disp_max [nf,...] = disp
tile_neibs[nf,...] = good_tiles[nf]
for offset_y in range(-radius, radius+1):
oy = offset_y+radius
for offset_x in range(-radius, radius+1):
ox = offset_x+radius
if offset_y or offset_x: # Skip center - already copied
np.minimum(disp_min[nf], bad_max[oy:oy+height, ox:ox+width], out=disp_min[nf])
np.maximum(disp_max[nf], bad_min[oy:oy+height, ox:ox+width], out=disp_max[nf])
tile_neibs[nf] += good[oy:oy+height, ox:ox+width]
pass
pass
pass
pass
#disp_thesh
disp_avar = disp_max - disp_min
disp_rvar = disp_avar * disp_thesh / np.maximum(disp_max, 0.001) # removing division by 0 error - those tiles will be anyway discarded
disp_var = np.select([disp_max >= disp_thesh, disp_max < disp_thesh],[disp_rvar,disp_avar])
return disp_var, tile_neibs # per file/tile: (max - min among 5x5 neibs),(number of "ggod" neib. tiles)
def assignBatchBins(self,
disp_bins,
str_bins,
files_per_scene = 5, # not used here, will be used when generating batches
min_batch_choices=10, # not used here, will be used when generating batches
max_batch_files = 10): # not used here, will be used when generating batches
"""
for each disparity/strength combination (self.disparity_bins * self.strength_bins = 1000*100) provide number of "large"
variable-size disparity/strength bin, or -1 if this disparity/strength combination does not seem right
"""
self.files_per_scene = files_per_scene
self.min_batch_choices=min_batch_choices
self.max_batch_files = max_batch_files
hist_to_batch = np.zeros((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
## hist_to_batch_multi = np.ones((self.blurred_hist.shape[0],self.blurred_hist.shape[1]),dtype=int) #zeros_like?
scale_hist= (disp_bins * str_bins)/self.blurred_hist.sum()
norm_b_hist = self.blurred_hist * scale_hist
## disp_list = [] # last disparity hist
# disp_multi = [] # number of disp rows to fit
disp_run_tot = 0.0
disp_batch = 0
disp=0
num_batch_bins = disp_bins * str_bins
disp_hist = np.linspace(0, num_batch_bins, disp_bins+1)
batch_index = 0
num_members = np.zeros((num_batch_bins,),int)
while disp_batch < disp_bins:
#disp_multi.append(1)
# while (disp < self.disparity_bins):
# disp_target_tot =disp_hist[disp_batch+1]
disp_run_tot_new = disp_run_tot
disp0 = disp # start disaprity matching disp_run_tot
while (disp_run_tot_new < disp_hist[disp_batch+1]) and (disp < self.disparity_bins):
disp_run_tot_new += norm_b_hist[:,disp].sum()
disp+=1;
disp_multi = 1
while (disp_batch < (disp_bins - 1)) and (disp_run_tot_new >= disp_hist[disp_batch+2]):
disp_batch += 1 # only if large disp_bins and very high hist value
disp_multi += 1
# now disp_run_tot - before this batch disparity col
str_bins_corr = str_bins * disp_multi # if too narrow disparity column - multiply number of strength columns
str_bins_corr_last = str_bins_corr -1
str_hist = np.linspace(disp_run_tot, disp_run_tot_new, str_bins_corr + 1)
str_run_tot_new = disp_run_tot
# str_batch = 0
str_index=0
# wide_col = norm_b_hist[:,disp0:disp] #disp0 - first column, disp - last+ 1
#iterate in linescan along the column
for si in range(self.strength_bins):
for di in range(disp0, disp,1):
if norm_b_hist[si,di] > 0.0 :
str_run_tot_new += norm_b_hist[si,di]
# do not increment after last to avoid precision issues
if (batch_index < num_batch_bins) and (num_members[batch_index] > 0) and (str_index < str_bins_corr_last) and (str_run_tot_new > str_hist[str_index+1]):
batch_index += 1
str_index += 1
if batch_index < num_batch_bins :
hist_to_batch[si,di] = batch_index
num_members[batch_index] += 1
else:
pass
else:
hist_to_batch[si,di] = -1
batch_index += 1 # it was not incremented afterthe last in the column to avoid rounding error
disp_batch += 1
disp_run_tot = disp_run_tot_new
pass
self.hist_to_batch = hist_to_batch
return hist_to_batch
def getBB(self, data_ds):
"""
for each file, each tile get histogram index (or -1 for bad tiles)
"""
## hist_to_batch = self.hist_to_batch
## files_batch_list = []
disp_step = ( self.disparity_max_clip - self.disparity_min_clip )/ self.disparity_bins
str_step = ( self.strength_max_clip - self.strength_min_clip )/ self.strength_bins
bb = np.empty_like(data_ds[...,0],dtype=int)
for findx in range(data_ds.shape[0]):
ds = data_ds[findx]
gt = ds[...,1] > 0.0 # OK
db = (((ds[...,0] - self.disparity_min_clip)/disp_step).astype(int))*gt
sb = (((ds[...,1] - self.strength_min_clip)/ str_step).astype(int))*gt
np.clip(db, 0, self.disparity_bins-1, out = db)
np.clip(sb, 0, self.strength_bins-1, out = sb)
bb[findx] = (self.hist_to_batch[sb.reshape(self.num_tiles),db.reshape(self.num_tiles)]) .reshape(db.shape[0],db.shape[1]) + (gt -1)
return bb
def makeBatchLists(self,
data_ds = None, # (disparity,strength) per scene, per tile #(19, 15, 20, 3)
data_gtaux = None, # full set of layers from GT_AUX file ("disparity","strength","rms","rms-split",...) (19, 15, 20, 10)
disp_var = None, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = None, # number of valid tiles around each center tile (for 3x3 (radius = 1) - maximal is 9
min_var = None, # Minimal tile variance to include
max_var = None, # Maximal tile variance to include
min_neibs = None, # Minimal number of valid tiles to include
use_split = False, # Select y single/multi-plane tiles (center only)
keep_split = False):# When sel_split, keep only multi-plane tiles (false - only single-plane)
if data_ds is None:
data_ds = self.train_ds
num_batch_tiles = np.empty((data_ds.shape[0],self.hist_to_batch.max()+1),dtype = int)
border_tiles = np.ones((data_ds.shape[1],data_ds.shape[2]), dtype=np.bool)
border_tiles[self.radius:-self.radius,self.radius:-self.radius] = False
border_tiles = border_tiles.reshape(self.num_tiles)
bb = self.getBB(data_ds) # (19, 15, 20)
use_neibs = not ((disp_var is None) or (disp_neibs is None) or (min_var is None) or (max_var is None) or (min_neibs is None))
list_of_file_lists=[]
for findx in range(data_ds.shape[0]):
foffs = findx * self.num_tiles
lst = []
for i in range (self.hist_to_batch.max()+1):
lst.append([])
if use_neibs:
disp_var_tiles = disp_var[findx].reshape(self.num_tiles) # was [y,x]
disp_neibs_tiles = disp_neibs[findx].reshape(self.num_tiles) # was [y,x]
if use_split:
if keep_split:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] <= data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
else:
drop_tiles = (data_gtaux[findx,:,:,ijt.IJFGBG.RMS] > data_gtaux[findx][...,ijt.IJFGBG.RMS_SPLIT]).reshape(self.num_tiles)
# disp_split_tiles =
for n, indx in enumerate(bb[findx].reshape(self.num_tiles)): # was [y,x]
if indx >= 0:
if border_tiles[n]:
continue # do not use border tiles
if use_neibs:
if disp_neibs_tiles[n] < min_neibs:
continue # too few neighbors
if not disp_var_tiles[n] >= min_var:
continue #too small variance
if not disp_var_tiles[n] < max_var:
continue #too large variance
if use_split:
if drop_tiles[n]:
continue #failed multi/single plane for DSI
lst[indx].append(foffs + n)
lst_arr=[]
for i,l in enumerate(lst):
lst_arr.append(l)
num_batch_tiles[findx,i] = len(l)
list_of_file_lists.append(lst_arr)
self.list_of_file_lists= list_of_file_lists
self.num_batch_tiles = num_batch_tiles
return list_of_file_lists, num_batch_tiles
#todo: only use other files if there are no enough choices in the main file!
'''
Add random files to the list until each (now 40) of the full_num_choices has more
than minimal (now 10) variants to chose from
'''
def augmentBatchFileIndices(self,
seed_index,
min_choices=None,
max_files = None,
set_ds = None
):
if min_choices is None:
min_choices = self.min_batch_choices
if max_files is None:
max_files = self.max_batch_files
if set_ds is None:
set_ds = self.train_ds
full_num_choices = self.num_batch_tiles[seed_index].copy()
flist = [seed_index]
all_choices = list(range(self.num_batch_tiles.shape[0]))
all_choices.remove(seed_index)
for _ in range (max_files-1):
if full_num_choices.min() >= min_choices:
break
findx = np.random.choice(all_choices)
flist.append(findx)
all_choices.remove(findx)
full_num_choices += self.num_batch_tiles[findx]
file_tiles_sparse = [[] for _ in set_ds] #list of empty lists for each train scene (will be sparse)
for nt in range(self.num_batch_tiles.shape[1]): #number of tiles per batch (not counting ml file variant) // radius2 - 40
tl = []
nchoices = 0
for findx in flist:
if (len(self.list_of_file_lists[findx][nt])):
tl.append(self.list_of_file_lists[findx][nt])
nchoices+= self.num_batch_tiles[findx][nt]
if nchoices >= min_choices: # use minimum of extra files
break;
while len(tl)==0:
## print("** BUG! could not find a single candidate from files ",flist," for cell ",nt)
## print("trying to use some other cell")
nt1 = np.random.randint(0,self.num_batch_tiles.shape[1])
for findx in flist:
if (len(self.list_of_file_lists[findx][nt1])):
tl.append(self.list_of_file_lists[findx][nt1])
nchoices+= self.num_batch_tiles[findx][nt1]
if nchoices >= min_choices: # use minimum of extra files
break;
tile = np.random.choice(np.concatenate(tl))
"""
Traceback (most recent call last):
File "explore_data2.py", line 1041, in
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds, radius = RADIUS)
File "explore_data2.py", line 761, in writeTFRewcordsEpoch
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData(ml_list, seed_index, min_choices=None, max_files = None, ml_num = None, set_ds = set_ds, radius = radius)
File "explore_data2.py", line 556, in prepareBatchData
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
File "explore_data2.py", line 494, in augmentBatchFileIndices
tile = np.random.choice(np.concatenate(tl))
ValueError: need at least one array to concatenate
"""
# print (nt, tile, tile//self.num_tiles, tile % self.num_tiles)
if not type (tile) is np.int64:
print("tile=",tile)
'''
List
'''
file_tiles_sparse[tile//self.num_tiles].append(tile % self.num_tiles)
file_tiles = []
for findx in flist:
file_tiles.append(np.sort(np.array(file_tiles_sparse[findx],dtype=int)))
return flist, file_tiles # file indices, list if tile indices for each file
def getMLList(self, ml_subdir, flist):
ml_list = []
for fn in flist:
# ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, ExploreData.ML_PATTERN)
## if isinstance(ml_subdir,list)
ml_patt = os.path.join(os.path.dirname(fn), ml_subdir, self.ml_pattern)
ml_list.append(glob.glob(ml_patt))
## self.ml_list = ml_list
return ml_list
def getBatchData(
self,
flist,
## tiles,
ml_list,
ml_num = None ): # 0 - use all ml files for the scene, >0 select random number
if ml_num is None:
ml_num = self.files_per_scene
ml_all_files = []
for findx in flist:
mli = list(range(len(ml_list[findx])))
if (ml_num > 0) and (ml_num < len(mli)):
mli_left = mli
mli = []
for _ in range(ml_num):
ml = np.random.choice(mli_left)
mli.append(ml)
mli_left.remove(ml)
ml_files = []
for ml_index in mli:
ml_files.append(ml_list[findx][ml_index])
ml_all_files.append(ml_files)
return ml_all_files
def prepareBatchData(self,
ml_list,
seed_index,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = None,
radius = 0):
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
if ml_num is None:
ml_num = self.files_per_scene #5
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-pairs', 'vert-pairs','diagm-pair', 'diago-pair']
flist,tiles = self.augmentBatchFileIndices(seed_index, min_choices, max_files, set_ds)
ml_all_files = self.getBatchData(
flist,
ml_list,
0) # ml_num) # 0 - use all ml files for the scene, >0 select random number
if self.debug_level > 1:
print ("==============",seed_index, flist)
for i, _ in enumerate(flist):
print(i,"\n".join(ml_all_files[i]))
print(tiles[i])
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = None # np.empty((total_tiles, len(corr_layers),81))
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for nscene, scene_files in enumerate(ml_all_files):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(tiles[nscene]) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(tiles[nscene]):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
"""
Assign tiles to several correlation files
"""
file_tiles = []
file_indices = []
for _ in scene_files:
file_tiles.append([])
num_scene_files = len(scene_files)
for t in full_tiles:
fi = np.random.randint(0, num_scene_files) #error here - probably wrong ml file pattern (no files matched)
file_tiles[fi].append(t)
file_indices.append(fi)
corr2d_list = []
target_disparity_list = []
gt_ds_list = []
for fi, path in enumerate (scene_files):
img = ijt.imagej_tiff(path, corr_layers, tile_list=file_tiles[fi]) #'hor-pairs' is not in list
corr2d_list.append (img.corr2d)
target_disparity_list.append(img.target_disparity)
gt_ds_list.append (img.gt_ds)
img_indices = [0] * len(scene_files)
for i, fi in enumerate(file_indices):
ti = img_indices[fi]
img_indices[fi] += 1
if corr2d_batch is None:
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers), corr2d_list[fi].shape[-1]))
gt_ds_batch [start_tile] = gt_ds_list[fi][ti]
target_disparity_batch [start_tile] = target_disparity_list[fi][ti]
corr2d_batch [start_tile] = corr2d_list[fi][ti]
start_tile += 1
"""
Sometimes get bad tile in ML file that was not bad in COMBO-DSI
Need to recover
np.argwhere(np.isnan(target_disparity_batch))
"""
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpoch(self, tfr_filename, ml_list, files_list = None, set_ds= None, radius = 0, num_scenes = None): # test_set=False):
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
return
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchData( #'hor-pairs' is not in list
ml_list,
seed_index,
min_choices=None,
max_files = None,
ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def prepareBatchDataLwir(self,
ds_gt, # ground truth disparity/strength
sweep_files,
sweep_disparities,
seed_index,
min_choices=None,
max_files = None,
set_ds = None,
radius = 0,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
"""
set_ds (from COMBO_DSI) is used to select tile clusters, exported values come from correlation files.
target_disparity for correlation files may be different than data_ds - replaced dureing ImageJ plugin
export if main camera and the rig (GT) converged on different objects fro the same tile
"""
if min_choices is None:
min_choices = self.min_batch_choices #10
if max_files is None:
max_files = self.max_batch_files #10
### if ml_num is None:
### ml_num = self.files_per_scene #5 ????
if set_ds is None:
set_ds = self.train_ds
tiles_in_sample = (2 * radius + 1) * (2 * radius + 1)
height = set_ds.shape[1]
width = set_ds.shape[2]
width_m1 = width-1
height_m1 = height-1
corr_layers = ['hor-aux', 'vert-aux','diagm-aux', 'diago-aux']
flist0, tiles0 = self.augmentBatchFileIndices(
seed_index,
min_choices,
max_files,
set_ds)
flist = []
tiles = []
for f,t in zip (flist0,tiles0):
if len(t):
flist.append(f)
tiles.append(t)
total_tiles = 0
for i, t in enumerate(tiles):
total_tiles += len(t) # tiles per scene * offset files per scene
if self.debug_level > 1:
print("Tiles in the batch=",total_tiles)
corr2d_batch = np.empty((total_tiles * tiles_in_sample, len(corr_layers),81)) # fix 81 t0 correct
gt_ds_batch = np.empty((total_tiles * tiles_in_sample, 2), dtype=float)
target_disparity_batch = np.empty((total_tiles * tiles_in_sample, ), dtype=float)
start_tile = 0
for scene, scene_tiles in zip(flist, tiles):
'''
Create tiles list including neighbors
'''
full_tiles = np.empty([len(scene_tiles) * tiles_in_sample], dtype = int)
indx = 0;
for i, nt in enumerate(scene_tiles):
ty = nt // width
tx = nt % width
for dy in range (-radius, radius+1):
y = np.clip(ty+dy,0,height_m1)
for dx in range (-radius, radius+1):
x = np.clip(tx+dx,0,width_m1)
full_tiles[indx] = y * width + x
indx += 1
scene_ds = ds_gt[scene,:,:,0:2].reshape(height * width,-1)
disparity_tiles = scene_ds[full_tiles,0] # GT DSI for each of the scene tiles
gtds_tiles = scene_ds[full_tiles] # DS pairs for each tile
gt_ds_batch[start_tile:start_tile+gtds_tiles.shape[0]] = gtds_tiles
if rnd_plate > 0.0:
for i in range(len(scene_tiles)):
disparity_tiles[i*tiles_in_sample : (i+1)*tiles_in_sample] += np.random.random() * 2 * rnd_plate - rnd_plate
if rnd_tile > 0.0:
disparity_tiles += np.random.random(disparity_tiles.shape[0]) * 2 * rnd_tile - rnd_tile
# find target disparity approximations from the available sweep files
sweep_indices = np.abs(np.add.outer(sweep_disparities[scene], -disparity_tiles)).argmin(0)
sfs = list(set(sweep_indices))
sfs.sort # unique sweep indices (files)
#read required tiles from required files, place results where they belong
for sf in sfs:
#find which of the full_tiles belong to this file
this_file_indices = np.nonzero(sweep_indices == sf)[0] #Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension.
tiles_to_read = full_tiles[this_file_indices]
where_to_put = this_file_indices + start_tile # index in the batch array (1000 tiles)
path = sweep_files[scene][sf]
img = ijt.imagej_tiff(path, corr_layers, tile_list=tiles_to_read)
corr2d_batch[where_to_put] = img.corr2d
target_disparity_batch[where_to_put] = img.target_disparity
pass
start_tile += full_tiles.shape[0]
pass
bad_tiles = np.argwhere(np.isnan(target_disparity_batch))
if (len(bad_tiles)>0):
print ("*** Got %d bad tiles in a batch, no code to replace :-("%(len(bad_tiles)))
self.corr2d_batch = corr2d_batch
self.target_disparity_batch = target_disparity_batch
self.gt_ds_batch = gt_ds_batch
return corr2d_batch, target_disparity_batch, gt_ds_batch
def writeTFRewcordsEpochLwir(self,
tfr_filename,
sweep_files,
sweep_disparities,
files_list = None,
set_ds= None,
radius = 0,
num_scenes = None,
rnd_tile = 0.0, ## disparity random for each tile
rnd_plate = 0.0):## disparity random for each plate (now 25 tiles)
# open the TFRecords file
if not '.tfrecords' in tfr_filename:
tfr_filename += '.tfrecords'
tfr_filename=tfr_filename.replace(' ','_')
if files_list is None:
files_list = self.files_train
if set_ds is None: # (19, 15, 20, 3)
set_ds = self.train_ds
try:
os.makedirs(os.path.dirname(tfr_filename))
print("Created directory "+os.path.dirname(tfr_filename))
except:
print("Directory "+os.path.dirname(tfr_filename)+" already exists, using it")
pass
#skip writing if file exists - it will be possible to continue or run several instances
if os.path.exists(tfr_filename):
print(tfr_filename+" already exists, skipping generation. Please remove and re-run this program if you want to regenerate the file")
# return # Temporary disable
writer = tf.io.TFRecordWriter(tfr_filename)
if num_scenes is None:
num_scenes = len(files_list)
if len(files_list) <= num_scenes:
#create and shuffle repetitive list of files of num_scenes.length
seed_list = np.arange(num_scenes) % len(files_list)
np.random.shuffle(seed_list)
else:
#shuffle all files and use first num_scenes of them
seed_list = np.arange(len(files_list))
np.random.shuffle(seed_list)
seed_list = seed_list[:num_scenes]
np.random.shuffle(seed_list)
cluster_size = (2 * radius + 1) * (2 * radius + 1)
for nscene, seed_index in enumerate(seed_list):
corr2d_batch, target_disparity_batch, gt_ds_batch = ex_data.prepareBatchDataLwir( #'hor-pairs' is not in list
ds_gt = set_ds,
sweep_files = sweep_files,
sweep_disparities = sweep_disparities,
seed_index = seed_index,
min_choices = None,
max_files = None,
### ml_num = None,
set_ds = set_ds, #DS data from all GT_AX files scanned
radius = radius,
rnd_tile = rnd_tile, ## disparity random for each tile
rnd_plate = rnd_plate)## disparity random for each plate (now 25 tiles)
#shuffles tiles in a batch
tiles_in_batch = corr2d_batch.shape[0]
clusters_in_batch = tiles_in_batch // cluster_size
permut = np.random.permutation(clusters_in_batch)
corr2d_clusters = corr2d_batch. reshape((clusters_in_batch,-1))
target_disparity_clusters = target_disparity_batch.reshape((clusters_in_batch,-1))
gt_ds_clusters = gt_ds_batch. reshape((clusters_in_batch,-1))
corr2d_batch_shuffled = corr2d_clusters[permut]. reshape((tiles_in_batch, -1))
target_disparity_batch_shuffled = target_disparity_clusters[permut].reshape((tiles_in_batch, -1))
gt_ds_batch_shuffled = gt_ds_clusters[permut]. reshape((tiles_in_batch, -1))
if nscene == 0:
dtype_feature_corr2d = _dtype_feature(corr2d_batch_shuffled)
dtype_target_disparity = _dtype_feature(target_disparity_batch_shuffled)
dtype_feature_gt_ds = _dtype_feature(gt_ds_batch_shuffled)
for i in range(tiles_in_batch):
x = corr2d_batch_shuffled[i].astype(np.float32)
y = target_disparity_batch_shuffled[i].astype(np.float32)
z = gt_ds_batch_shuffled[i].astype(np.float32)
d_feature = {'corr2d': dtype_feature_corr2d(x),
'target_disparity':dtype_target_disparity(y),
'gt_ds': dtype_feature_gt_ds(z)}
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
writer.write(example.SerializeToString())
if (self.debug_level > 0):
print_time("Scene %d (%d) of %d -> %s"%(nscene, seed_index, len(seed_list), tfr_filename))
writer.close()
sys.stdout.flush()
def showVariance(self,
rds_list, # list of disparity/strength files, suchas training, testing
disp_var_list, # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list, # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = 1.5,
neibs_min = 9,
#Same parameters as for the histogram
# disparity_bins = 1000,
# strength_bins = 100,
# disparity_min_drop = -0.1,
# disparity_min_clip = -0.1,
# disparity_max_drop = 100.0,
# disparity_max_clip = 100.0,
# strength_min_drop = 0.1,
# strength_min_clip = 0.1,
# strength_max_drop = 1.0,
# strength_max_clip = 0.9,
normalize = False): # True):
good_tiles_list=[]
for nf, combo_rds in enumerate(rds_list):
disp_var = disp_var_list[nf]
num_neibs = num_neibs_list[nf]
good_tiles = np.empty((combo_rds.shape[0], combo_rds.shape[1],combo_rds.shape[2]), dtype=bool)
for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
ds = combo_rds[ids]
disparity = ds[...,0]
strength = ds[...,1]
variance = disp_var[ids]
neibs = num_neibs[ids]
good_tiles[ids] = disparity >= self.disparity_min_drop
good_tiles[ids] &= disparity <= self.disparity_max_drop
good_tiles[ids] &= strength >= self.strength_min_drop
good_tiles[ids] &= strength <= self.strength_max_drop
good_tiles[ids] &= neibs >= neibs_min
good_tiles[ids] &= variance >= variance_min
good_tiles[ids] &= variance < variance_max
disparity = np.nan_to_num(disparity, copy = False) # to be able to multiply by 0.0 in mask | copy=False, then out=disparity all done in-place
strength = np.nan_to_num(strength, copy = False) # likely should never happen
# np.clip(disparity, self.disparity_min_clip, self.disparity_max_clip, out = disparity)
# np.clip(strength, self.strength_min_clip, self.strength_max_clip, out = strength)
good_tiles_list.append(good_tiles)
combo_rds = np.concatenate(rds_list)
# hist, xedges, yedges = np.histogram2d( # xedges, yedges - just for debugging
hist, _, _ = np.histogram2d( # xedges, yedges - just for debugging
x = combo_rds[...,1].flatten(),
y = combo_rds[...,0].flatten(),
bins= (self.strength_bins, self.disparity_bins),
range= ((self.strength_min_clip,self.strength_max_clip),(self.disparity_min_clip,self.disparity_max_clip)),
normed= normalize,
weights= np.concatenate(good_tiles_list).flatten())
mytitle = "Disparity_Strength variance histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle("Min variance = %f, max variance = %f, min neibs = %d"%(variance_min, variance_max, neibs_min))
# plt.imshow(hist, vmin=0, vmax=.1 * hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(hist, vmin=0.0, vmax=300.0)#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
# for i, combo_rds in enumerate(rds_list):
# for ids in range (combo_rds.shape[0]): #iterate over all scenes ds[2][rows][cols]
# combo_rds[ids][...,1]*= good_tiles_list[i][ids]
# return hist, xedges, yedges
#MAIN
if __name__ == "__main__":
LATEST_VERSION_ONLY = True
try:
topdir_train = sys.argv[1]
except IndexError:
# topdir_train = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train"#test" #all/"
## topdir_train = "/data_ssd/data_sets/train_mlr32_18d"
## topdir_train = '/data_ssd/data_sets/test_only'# ''
### topdir_train = '/data_ssd/data_sets/train_set2'# ''
topdir_train = '/data_ssd/lwir_sets/lwir_train1'# ''
# tf_data_5x5_main_10_heur
try:
topdir_test = sys.argv[2]
except IndexError:
# topdir_test = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/test"#test" #all/"
# topdir_test = "/data_ssd/data_sets/test_mlr32_18d"
## topdir_test = '/data_ssd/data_sets/test_only'
### topdir_test = '/data_ssd/data_sets/test_set2'
topdir_test = '/data_ssd/lwir_sets/lwir_test1'
try:
pathTFR = sys.argv[3]
except IndexError:
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# pathTFR = "/home/eyesis/x3d_data/data_sets/tf_data_5x5" #no trailing "/"
### pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_13_heur"
pathTFR = '/data_ssd/lwir_sets/tf_data_5x5_01'
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
try:
ml_subdir = sys.argv[4]
except IndexError:
# ml_subdir = "ml"
# ml_subdir = "mlr32_18a"
# ml_subdir = "mlr32_18d"
# ml_subdir = "{ml32,mlr32_18d}"
ml_subdir = "ml*"
try:
ml_pattern = sys.argv[5]
except IndexError:
### ml_pattern = "*-ML_DATA*MAIN.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
ml_pattern = "*-ML_DATA*-D*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_10_heur"
#1562390086_121105-ML_DATA-32B-AOT-FZ0.03-D00.00000.tiff
## ml_pattern = "*-ML_DATA*MAIN_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_11_rnd"
## ml_pattern = "*-ML_DATA*RIG_RND*.tiff" ## pathTFR = "/data_ssd/data_sets/tf_data_5x5_main_12_rigrnd"
## ML_PATTERN = "*-ML_DATA*RIG_RND*.tiff"
#1527182801_296892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
# pathTFR = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/tf_data_3x3b" #no trailing "/"
# test_corr = '/home/eyesis/x3d_data/models/var_main/www/html/x3domlet/models/all-clean/overlook/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # overlook
# test_corr = '/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
# test_corr = '/home/eyesis/x3d_data/models/dsi_combo_and_ml_all/state_street/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff' # State Street
"""
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff
/data_ssd/models/plane_1527182801/1527182805_696892/v02/mlr32_18d/1527182805_696892-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18a/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18a/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18a/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18a/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18a/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18a/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256816_150165/v02/mlr32_18c/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527256858_150165/v01/mlr32_18c/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182802_096892/v02/mlr32_18c/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182805_096892/v02/mlr32_18c/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527182810_096892/v02/mlr32_18c/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-MAIN_RND2.00000.tiff', # far plane
]
test_corrs = [
'/data_ssd/data_sets/test_mlr32_18d/1527257933_150165/v04/mlr32_18d/1527257933_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # overlook
'/data_ssd/data_sets/test_mlr32_18d/1527256816_150165/v02/mlr32_18d/1527256816_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527256858_150165/v01/mlr32_18d/1527256858_150165-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # State Street
'/data_ssd/data_sets/test_mlr32_18d/1527182802_096892/v02/mlr32_18d/1527182802_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # near plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182805_096892/v02/mlr32_18d/1527182805_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # medium plane"
'/data_ssd/data_sets/test_mlr32_18d/1527182810_096892/v02/mlr32_18d/1527182810_096892-ML_DATA-32B-O-FZ0.05-RIG_RND2.00000.tiff', # far plane
]
"""
# These images are made with large random offset
'''
test_corrs = [
'/data_ssd/data_sets/test_only/1527258897_071435/v02/ml32/1527258897_071435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257894_750165/v02/ml32/1527257894_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257406_950165/v02/ml32/1527257406_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257757_950165/v02/ml32/1527257757_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257370_950165/v02/ml32/1527257370_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_950165/v02/ml32/1527257235_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_350165/v02/ml32/1527257235_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527259003_271435/v02/ml32/1527259003_271435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257787_950165/v02/ml32/1527257787_950165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_150165/v02/ml32/1527257235_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_750165/v02/ml32/1527257235_750165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527258936_671435/v02/ml32/1527258936_671435-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257244_350165/v02/ml32/1527257244_350165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
'/data_ssd/data_sets/test_only/1527257235_550165/v02/ml32/1527257235_550165-ML_DATA-32B-O-FZ0.05-MAIN.tiff',
]
'''
test_corrs = []
#1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN-RND2.00000.tiff
#/home/eyesis/x3d_data/data_sets/test_mlr32_18a/1527257933_150165/v04/mlr32_18c/1527257933_150165-ML_DATA-32B-O-FZ0.05-MAIN.tiff
test_sets = ["/data_ssd/lwir_sets/lwir_test1/1562390086_121105/v01/ml32"]
#Parameters to generate neighbors data. Set radius to 0 to generate single-tile
TEST_SAME_LENGTH_AS_TRAIN = False # True # make test to have same number of entries as train ones
FIXED_TEST_LENGTH = None # put number of test scenes to output (used when making test only from few or single test file
RADIUS = 2 # 5x5
FRAC_NEIBS_VALID = 0.55# 8 #LWIR new
MIN_NEIBS = (2 * RADIUS + 1) * (2 * RADIUS + 1) # All tiles valid == 9
MIN_NEIBS = round (MIN_NEIBS * FRAC_NEIBS_VALID)
VARIANCE_THRESHOLD = 0.4 # 1.5
VARIANCE_SCALE_DISPARITY = 5.0 #Scale variance if average is above this
NUM_TRAIN_SETS = 32 # 8
FGBGMODE_TEST = 1 # 0 - average, 1 - FG, 2 - BG, 3 - AUX
FGBGMODE_TRAIN = 1 # 0 - average, 1 - FG, 2 - BG
RND_AMPLIUDE_TEST = 0.5 # present corr2d rendered +/- this far from the GT
RND_AMPLIUDE_TRAIN_TILE = 0.5 # train with corr2d rendered +/- this far from the GT - independent for each tile component
RND_AMPLIUDE_TRAIN_PLATE = 0.5 # train with corr2d rendered +/- this far from the GT - common for each (5x5) plate component
MAX_MAIN_OFFSET = 2.5 # do not use tile for training if MAIN camera (AUX for LWIR) differs more from GT
MODEL_ML_DIR = "ml32" # subdirectory with the ML disparity sweep files
if not topdir_train:
NUM_TRAIN_SETS = 0
if RADIUS == 0:
BATCH_DISP_BINS = 50 # 1000 * 1
BATCH_STR_BINS = 20 # 10
elif RADIUS == 1:
BATCH_DISP_BINS = 15 # 120 * 9
BATCH_STR_BINS = 8
else: # RADIUS = 2
BATCH_DISP_BINS = 10 # 40 * 25
BATCH_STR_BINS = 4
train_filenameTFR = pathTFR+"/train"
test_filenameTFR = pathTFR+"/test"
''' Prepare full image for testing '''
"""
for model_ml_path in test_sets:
writeTFRecordsFromImageSet(
model_ml_path, # model/version/ml_dir
FGBGMODE_TEST, # 0, # expot_mode, # 0 - GT average, 1 - GT FG, 2 - GT BG, 3 - AUX disparity
RND_AMPLIUDE_TEST, # random_offset, # for modes 0..2 - add random offset of -random_offset to +random_offset, in mode 3 add random to GT average if no AUX data
pathTFR) # TFR directory
"""
# disp_bins = 20,
# str_bins=10)
# corr2d, target_disparity, gt_ds = readTFRewcordsEpoch(train_filenameTFR)
# print_time("Read %d tiles"%(corr2d.shape[0]))
# exit (0)
ex_data = ExploreData(
topdir_train = topdir_train,
topdir_test = topdir_test,
ml_subdir = MODEL_ML_DIR,
ml_pattern = ml_pattern,
max_main_offset = MAX_MAIN_OFFSET,
latest_version_only = LATEST_VERSION_ONLY,
debug_level = 3, #1, #3, ##0, #3,
disparity_bins = 50, #100 #200, #1000,
strength_bins = 50, #100
disparity_min_drop = -0.1,
disparity_min_clip = -0.1,
disparity_max_drop = 5.0, #100.0,
disparity_max_clip = 5.0, #100.0,
strength_min_drop = 0.02, # 0.1,
strength_min_clip = 0.02, # 0.1,
strength_max_drop = 0.3, # 1.0,
strength_max_clip = 0.27, # 0.9,
hist_sigma = 2.0, # Blur log histogram
hist_cutoff= 0.001, # of maximal
fgbg_mode = FGBGMODE_TRAIN, # average, 1 - FG, 2 - BG (3 - AUX - not used here)
rnd_tile = RND_AMPLIUDE_TRAIN_TILE, # use corr2d rendered with target disparity this far shuffled from the GT
rnd_plate = RND_AMPLIUDE_TRAIN_PLATE, # use corr2d rendered with target disparity this far shuffled from the GT
radius = RADIUS)
mytitle = "Disparity_Strength histogram"
fig = plt.figure()
fig.canvas.set_window_title(mytitle)
fig.suptitle(mytitle)
# plt.imshow(lhist,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.imshow(ex_data.blurred_hist, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
plt.colorbar(orientation='horizontal') # location='bottom')
hist_to_batch = ex_data.assignBatchBins(
disp_bins = BATCH_DISP_BINS,
str_bins = BATCH_STR_BINS)
bb_display = hist_to_batch.copy()
bb_display = ( 1+ (bb_display % 2) + 2 * ((bb_display % 20)//10)) * (hist_to_batch > 0) #).astype(float)
fig2 = plt.figure()
fig2.canvas.set_window_title("Batch indices")
fig2.suptitle("Batch index for each disparity/strength cell")
plt.imshow(bb_display) #, vmin=0, vmax=.1 * ex_data.blurred_hist.max())#,vmin=-6,vmax=-2) # , vmin=0, vmax=.01)
""" prepare test dataset """
'''
for test_corr in test_corrs:
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
'''
if (RADIUS > 0):
disp_var_test, num_neibs_test = ex_data.exploreNeibs(ex_data.test_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
disp_var_train, num_neibs_train = ex_data.exploreNeibs(ex_data.train_ds, RADIUS, VARIANCE_SCALE_DISPARITY)
# show varinace histogram
# for var_thresh in [0.1, 1.0, 1.5, 2.0, 5.0]:
for var_thresh in [VARIANCE_THRESHOLD]:
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = 0.0,
variance_max = var_thresh,
neibs_min = MIN_NEIBS)
ex_data.showVariance(
rds_list = [ex_data.train_ds, ex_data.test_ds], # list of disparity/strength files, suchas training, testing
disp_var_list = [disp_var_train, disp_var_test], # list of disparity variance files. Same shape(but last dim) as rds_list
num_neibs_list = [num_neibs_train, num_neibs_test], # list of number of tile neibs files. Same shape(but last dim) as rds_list
variance_min = var_thresh,
variance_max = 1000.0,
neibs_min = MIN_NEIBS)
pass
pass
else:
disp_var_test, num_neibs_test = None, None
disp_var_train, num_neibs_train = None, None
#Wrong way to get ML lists for LWIR mode - make it an error!
### ml_list_train=ex_data.getMLList(ml_subdir, ex_data.files_train)
### ml_list_test= ex_data.getMLList(ml_subdir, ex_data.files_test)
ml_list_train= []
ml_list_test= []
if FIXED_TEST_LENGTH is None:
num_test_scenes = len([ex_data.files_test, ex_data.files_train][TEST_SAME_LENGTH_AS_TRAIN])
else:
num_test_scenes = FIXED_TEST_LENGTH
if RADIUS == 0 :
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
scale_disp = VARIANCE_SCALE_DISPARITY,
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
pass
for train_var in range (NUM_TRAIN_SETS):
fpath = train_filenameTFR+("%03d"%(train_var,))
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_train, files_list = ex_data.files_train, set_ds= ex_data.train_ds)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
fpath = test_filenameTFR # +("-%03d"%(train_var,))
ex_data.writeTFRewcordsEpoch(fpath, ml_list = ml_list_test, files_list = ex_data.files_test, set_ds= ex_data.test_ds, num_scenes = num_test_scenes)
pass
else: # RADIUS > 0
# test
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_le_test = num_batch_tiles_test.sum()
print("Number of <= %f disparity variance tiles: %d (est)"%(VARIANCE_THRESHOLD, num_le_test))
fpath = test_filenameTFR +("TEST_R%d_LE%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
# next line:
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_test, num_batch_tiles_test = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.test_ds,
data_gtaux = ex_data.test_gtaux,
disp_var = disp_var_test, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_test, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = VARIANCE_THRESHOLD, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_gt_test = num_batch_tiles_test.sum()
high_fract_test = 1.0 * num_gt_test / (num_le_test + num_gt_test)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_test, high_fract_test))
fpath = test_filenameTFR +("TEST_R%d_GT%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.test_sweep_files,
sweep_disparities = ex_data.test_sweep_disparities,
files_list = ex_data.files_test,
set_ds = ex_data.test_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
#fake
if NUM_TRAIN_SETS > 0:
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_le_fake = num_batch_tiles_fake.sum()
print("Number of <= %f disparity variance tiles: %d (test)"%(VARIANCE_THRESHOLD, num_le_fake))
fpath = test_filenameTFR +("FAKE_R%d_LE%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_fake, num_batch_tiles_fake = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = VARIANCE_THRESHOLD, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_gt_fake = num_batch_tiles_fake.sum()
high_fract_fake = 1.0 * num_gt_fake / (num_le_fake + num_gt_fake)
print("Number of > %f disparity variance tiles: %d, fraction = %f (test)"%(VARIANCE_THRESHOLD, num_gt_fake, high_fract_fake))
fpath = test_filenameTFR +("FAKE_R%d_GT%4.1f"%(RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = num_test_scenes,
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
# train 32 sets
for train_var in range (NUM_TRAIN_SETS): # Recalculate list for each file - slower, but will alternate lvar/hvar
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = 0.0, # Minimal tile variance to include
max_var = VARIANCE_THRESHOLD, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_le_train = num_batch_tiles_train.sum()
print("Number of <= %f disparity variance tiles: %d (train)"%(VARIANCE_THRESHOLD, num_le_train))
fpath = train_filenameTFR+("%03d_R%d_LE%4.1f"%(train_var,RADIUS,VARIANCE_THRESHOLD))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
list_of_file_lists_train, num_batch_tiles_train = ex_data.makeBatchLists( # results are also saved to self.*
data_ds = ex_data.train_ds,
data_gtaux = ex_data.train_gtaux,
disp_var = disp_var_train, # difference between maximal and minimal disparity for each scene, each tile
disp_neibs = num_neibs_train, # number of valid tiles around each center tile (for 3x3 (radius = 1) - macximal is 9
min_var = VARIANCE_THRESHOLD, # Minimal tile variance to include
max_var = 1000.0, # Maximal tile variance to include
min_neibs = MIN_NEIBS, # Minimal number of valid tiles to include
use_split = True, # Select y single/multi-plane tiles (center only)
keep_split = False) # When sel_split, keep only multi-plane tiles (false - only single-plane)
num_gt_train = num_batch_tiles_train.sum()
high_fract_train = 1.0 * num_gt_train / (num_le_train + num_gt_train)
print("Number of > %f disparity variance tiles: %d, fraction = %f (train)"%(VARIANCE_THRESHOLD, num_gt_train, high_fract_train))
fpath = (train_filenameTFR+("%03d_R%d_GT%4.1f"%(train_var,RADIUS,VARIANCE_THRESHOLD)))
ex_data.writeTFRewcordsEpochLwir(
fpath,
sweep_files = ex_data.train_sweep_files,
sweep_disparities = ex_data.train_sweep_disparities,
files_list = ex_data.files_train,
set_ds = ex_data.train_ds,
radius = ex_data.radius,
num_scenes = len(ex_data.files_train),
rnd_tile = ex_data.rnd_tile,
rnd_plate = ex_data.rnd_plate)
plt.show()
"""
scene = os.path.basename(test_corr)[:17]
scene_version= os.path.basename(os.path.dirname(os.path.dirname(test_corr)))
fname =scene+'-'+scene_version
img_filenameTFR = os.path.join(pathTFR,'img',fname)
print_time("Saving test image %s as tiles..."%(img_filenameTFR),end = " ")
writeTFRewcordsImageTiles(test_corr, img_filenameTFR)
print_time("Done")
pass
"""
pass
lwir-nn-2c44046de3b521c386ae33bec9c0eb5075f4477f/imagej_tiff.py 0000775 0000000 0000000 00000050470 13514453450 0022731 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
'''
/**
* @file imagej_tiff.py
* @brief open multi layer tiff files, display layers and parse meta data
* @par License:
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
'''
__copyright__ = "Copyright 2018, Elphel, Inc."
__license__ = "GPL-3.0+"
__email__ = "oleg@elphel.com"
'''
Notes:
- Pillow 5.1.0. Version 4.1.1 throws error (VelueError):
~$ (sudo) pip3 install Pillow --upgrade
~$ python3
>>> import PIL
>>> PIL.PILLOW_VERSION
'5.1.0'
'''
from PIL import Image
import xml.etree.ElementTree as ET
import numpy as np
import matplotlib.pyplot as plt
import sys
import xml.dom.minidom as minidom
import time
#http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[38;5;214m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
class IJML:
# as devined in ImageDtt.java
ML_OTHER_TARGET = 0 # Offset to target disparity data in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH = 2 # Offset to ground truth disparity data in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_STRENGTH = 4 # Offset to ground truth confidence data in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_RMS = 6 # Offset to ground truth RMS in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_RMS_SPLIT = 8 # Offset to ground truth combined FG/BG RMS in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_FG_DISP = 10 # Offset to ground truth FG disparity in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_FG_STR = 12 # Offset to ground truth FG strength in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_BG_DISP = 14 # Offset to ground truth BG disparity in ML_OTHER_INDEX layer tile
ML_OTHER_GTRUTH_BG_STR = 16 # Offset to ground truth BG strength in ML_OTHER_INDEX layer tile
ML_OTHER_AUX_DISP = 18 # Offset to AUX heuristic disparity in ML_OTHER_INDEX layer tile
ML_OTHER_AUX_STR = 20 # Offset to AUX heuristic strength in ML_OTHER_INDEX layer tile
# indices
TARGET = ML_OTHER_TARGET // 2
GTRUTH = ML_OTHER_GTRUTH // 2
STRENGTH = ML_OTHER_GTRUTH_STRENGTH // 2
RMS = ML_OTHER_GTRUTH_RMS // 2
RMS_SPLIT = ML_OTHER_GTRUTH_RMS_SPLIT // 2
FG_DISP = ML_OTHER_GTRUTH_FG_DISP // 2
FG_STR = ML_OTHER_GTRUTH_FG_STR // 2
BG_DISP = ML_OTHER_GTRUTH_BG_DISP // 2
BG_STR = ML_OTHER_GTRUTH_BG_STR // 2
AUX_DISP = ML_OTHER_AUX_DISP // 2
AUX_STR = ML_OTHER_AUX_STR // 2
SIGNED = (TARGET, GTRUTH, FG_DISP, BG_DISP)
UNSIGNED_RMS = (RMS, RMS_SPLIT)
NUM_VALUES = 11
class IJFGBG:
DSI_NAMES = ["disparity","strength","rms","rms-split","fg-disp","fg-str","bg-disp","bg-str","aux-disp","aux-str"]
DISPARITY = 0
STRENGTH = 1
RMS = 2
RMS_SPLIT = 3
FG_DISP = 4
FG_STR = 5
BG_DISP = 6
BG_STR = 7
AUX_DISP = 8
AUX_STR = 9
# reshape to tiles
def get_tile_images(image, width=8, height=8):
_nrows, _ncols, depth = image.shape
_size = image.size
_strides = image.strides
nrows, _m = divmod(_nrows, height)
ncols, _n = divmod(_ncols, width)
if _m != 0 or _n != 0:
return None
return np.lib.stride_tricks.as_strided(
np.ravel(image),
shape=(nrows, ncols, height, width, depth),
strides=(height * _strides[0], width * _strides[1], *_strides),
writeable=False
)
# TiffFile has no len exception
#import imageio
#from libtiff import TIFF
'''
Description:
Reads a tiff files with multiple layers that were saved by imagej
Methods:
.getstack(items=[])
returns np.array, layers are stacked along depth - think of RGB channels
@items - if empty = all, if not - items[i] - can be layer index or layer's label name
.channel(index)
returns np.array of a single layer
.show_images(items=[])
@items - if empty = all, if not - items[i] - can be layer index or layer's label name
.show_image(index)
Examples:
#1
'''
class imagej_tiff:
# imagej stores labels lengths in this tag
__TIFF_TAG_LABELS_LENGTHS = 50838
# imagej stores labels conents in this tag
__TIFF_TAG_LABELS_STRINGS = 50839
# init
def __init__(self,filename, layers = None, tile_list = None):
# file name
self.fname = filename
tif = Image.open(filename)
# total number of layers in tiff
self.nimages = tif.n_frames
# labels array
self.labels = []
# infos will contain xml data Elphel stores in some of tiff files
self.infos = []
# dictionary from decoded infos[0] xml data
self.props = {}
# bits per sample, type int
self.bpp = tif.tag[258][0]
self.__split_labels(tif.n_frames,tif.tag)
self.__parse_info()
try:
self.nan_bug = self.props['VERSION']== '1.0' # data between min and max is mapped to 0..254 instead of 1.255
except:
self.nan_bug = False # other files, not ML ones
# image layers stacked along depth - (think RGB)
self.image = []
if layers is None:
# fill self.image
for i in range(self.nimages):
tif.seek(i)
a = np.array(tif)
a = np.reshape(a,(a.shape[0],a.shape[1],1))
#a = a[:,:,np.newaxis]
# scale for 8-bits
# exclude layer named 'other'
if self.bpp==8:
_min = self.data_min
_max = self.data_max
_MIN = 1
_MAX = 255
if (self.nan_bug):
_MIN = 0
_MAX = 254
else:
if self.labels[i]!='other':
a[a==0]=np.nan
a = a.astype(float)
if self.labels[i]!='other':
# a[a==0]=np.nan
a = (_max-_min)*(a-_MIN)/(_MAX-_MIN)+_min
# init
if i==0:
self.image = a
# stack along depth (think of RGB channels)
else:
self.image = np.append(self.image,a,axis=2)
else:
if tile_list is None:
indx = 0
for layer in layers:
tif.seek(self.labels.index(layer))
a = np.array(tif)
if not indx:
self.image = np.empty((a.shape[0],a.shape[1],len(layers)),a.dtype)
self.image[...,indx] = a
indx += 1
else:
other_label = "other"
# print(tile_list)
num_tiles = len(tile_list)
num_layers = len(layers)
tiles_corr = np.empty((num_tiles,num_layers,self.tileH*self.tileW),dtype=float)
# tiles_other=np.empty((num_tiles,3),dtype=float)
tiles_other=self.gettilesvalues( # returns nparray of 11 floats (was 3)
tif = tif,
tile_list=tile_list,
label=other_label)
for nl,label in enumerate(layers):
tif.seek(self.labels.index(label)) #'hor-pairs' is not in list
layer = np.array(tif) # 8 or 32 bits
tilesX = layer.shape[1]//self.tileW
for nt,tl in enumerate(tile_list):
ty = tl // tilesX
tx = tl % tilesX
a = np.ravel(layer[self.tileH * ty : self.tileH * (ty+1),
self.tileW * tx : self.tileW * (tx+1)])
#convert from int8
if self.bpp==8:
a = a.astype(float)
if np.isnan(tiles_other[nt][0]):
# print("Skipping NaN tile ",tl)
a[...] = np.nan
else:
_min = self.data_min
_max = self.data_max
_MIN = 1
_MAX = 255
if (self.nan_bug):
_MIN = 0
_MAX = 254
else:
a[a==0] = np.nan
a = (_max-_min)*(a-_MIN)/(_MAX-_MIN)+_min
tiles_corr[nt,nl] = a
pass
pass
self.corr2d = tiles_corr
self.target_disparity = tiles_other[...,0]
self.gt_ds = tiles_other[...,1:3]
self.payload = tiles_other#[...,0:12]
pass
# init done, close the image
tif.close()
# label == tiff layer name
def getvalues(self,label=""):
l = self.getstack([label],shape_as_tiles=True)
res = np.empty((l.shape[0],l.shape[1], IJML.NUM_VALUES)) # was just 3
for i in range(res.shape[0]):
for j in range(res.shape[1]):
# 9x9 -> 81x1
m = np.ravel(l[i,j])
if self.bpp==32:
for k in range(res.shape[2]):
res[i,j,k] = m[k * 2]
elif self.bpp==8:
for k in range(res.shape[2]):
if k in IJML.SIGNED:
res[i,j,k] = ((m[2 * k] - 128) * 256 + m[2 * k + 1]) / 128
elif k in IJML.UNSIGNED_RMS:
res[i,j,k] = (m[2 * k]*256+m[2 * k + 1])/4096.0
else:
res[i,j,k] = (m[2 * k]*256+m[2 * k + 1])/65536.0
else:
for k in range(res.shape[2]):
res[i,j,k] = np.nan
# NaNize - TODO: update !
if self.bpp==8:
a = res[:,:,0]
a[a==-256] = np.nan
b = res[:,:,1]
b[b==-256] = np.nan
c = res[:,:,2]
c[c==0] = np.nan
return res
# 3 values per tile: target disparity, GT disparity, GT confidence
# With LWIR/aux there are more!
def gettilesvalues(self,
tif,
tile_list,
label=""):
res = np.empty((len(tile_list), IJML.NUM_VALUES),dtype=float) # was only 3
tif.seek(self.labels.index(label))
layer = np.array(tif) # 8 or 32 bits
tilesX = layer.shape[1]//self.tileW
for i,tl in enumerate(tile_list):
ty = tl // tilesX
tx = tl % tilesX
m = np.ravel(layer[self.tileH*ty:self.tileH*(ty+1),self.tileW*tx:self.tileW*(tx+1)])
if self.bpp==32:
for k in range(res.shape[1]):
res[i,k] = m[k * 2]
elif self.bpp==8:
for k in range(res.shape[1]):
if k in IJML.SIGNED:
res[i,k] = ((m[2 * k] - 128) * 256 + m[2 * k + 1]) / 128
elif k in IJML.UNSIGNED_RMS:
res[i,k] = (m[2 * k]*256+m[2 * k + 1])/4096.0
else:
res[i,k] = (m[2 * k]*256+m[2 * k + 1])/65536.0
else:
for k in range(res.shape[1]):
res[i,k] = np.nan
# NaNize update!
if self.bpp==8:
a = res[...,0]
a[a==-256] = np.nan
b = res[...,1]
b[b==-256] = np.nan
c = res[...,2]
c[c==0] = np.nan
return res
# get ordered stack of images by provided items
# by index or label name
def getstack(self,items=[],shape_as_tiles=False):
a = ()
if len(items)==0:
b = self.image
else:
for i in items:
if type(i)==int:
a += (self.image[:,:,i],)
elif type(i)==str:
j = self.labels.index(i)
a += (self.image[:,:,j],)
# stack along depth
b = np.stack(a,axis=2)
if shape_as_tiles:
b = get_tile_images(b,self.tileW,self.tileH)
return b
# get np.array of a channel
# * does not handle out of bounds
def channel(self,index):
return self.image[:,:,index]
# display images by index or label
def show_images(self,items=[]):
# show listed only
if len(items)>0:
for i in items:
if type(i)==int:
self.show_image(i)
elif type(i)==str:
j = self.labels.index(i)
self.show_image(j)
# show all
else:
for i in range(self.nimages):
self.show_image(i)
# display single image
def show_image(self,index):
# display using matplotlib
t = self.image[:,:,index]
mytitle = "("+str(index+1)+" of "+str(self.nimages)+") "+self.labels[index]
fig = plt.figure()
fig.canvas.set_window_title(self.fname+": "+mytitle)
fig.suptitle(mytitle)
#plt.imshow(t,cmap=plt.get_cmap('gray'))
plt.imshow(t)
plt.colorbar()
# display using Pillow - need to scale
# remove NaNs - no need
#t[np.isnan(t)]=np.nanmin(t)
# scale to [min/max*255:255] range
#t = (1-(t-np.nanmax(t))/(t-np.nanmin(t)))*255
#tmp_im = Image.fromarray(t)
#tmp_im.show()
# puts etrees in infoss
def __parse_info(self):
infos = []
for info in self.infos:
infos.append(ET.fromstring(info))
self.infos = infos
# specifics
# properties dictionary
pd = {}
if infos:
for child in infos[0]:
#print(child.tag+"::::::"+child.text)
pd[child.tag] = child.text
self.props = pd
# tiles are squares
self.tileW = int(self.props['tileWidth'])
self.tileH = int(self.props['tileWidth'])
if self.bpp==8:
self.data_min = float(self.props['data_min'])
self.data_max = float(self.props['data_max'])
# makes arrays of labels (strings) and unparsed xml infos
def __split_labels(self,n,tag):
# list
tag_lens = tag[self.__TIFF_TAG_LABELS_LENGTHS]
# string
tag_labels = tag[self.__TIFF_TAG_LABELS_STRINGS].decode()
# remove 1st element: it's something like IJIJlabl..
tag_labels = tag_labels[tag_lens[0]:]
tag_lens = tag_lens[1:]
# the last ones are images labels
# normally the difference is expected to be 0 or 1
skip = len(tag_lens) - n
self.labels = []
self.infos = []
for l in tag_lens:
string = tag_labels[0:l].replace('\x00','')
if skip==0:
self.labels.append(string)
else:
self.infos.append(string)
skip -= 1
tag_labels = tag_labels[l:]
#MAIN
if __name__ == "__main__":
try:
fname = sys.argv[1]
except IndexError:
fname = "/data_ssd/lwir3d/models/002/1562390096_605721/v01/ml32/1562390096_605721-ML_DATA-32B-AOT-FZ0.03-AG.tiff"
# fname = "/mnt/dde6f983-d149-435e-b4a2-88749245cc6c/home/eyesis/x3d_data/data_sets/train/1527182807_896892/v02/ml/1527182807_896892-ML_DATA-08B-O-FZ0.05-OFFS0.40000.tiff"
# fname = "1521849031_093189-ML_DATA-32B-O-OFFS1.0.tiff"
# fname = "1521849031_093189-ML_DATA-08B-O-OFFS1.0.tiff"
#fname = "1521849031_093189-DISP_MAP-D0.0-46.tif"
#fname = "1526905735_662795-ML_DATA-08B-AIOTD-OFFS2.0.tiff"
#fname = "test.tiff"
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
ijt = imagej_tiff(fname)
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
print("TIFF stack labels: "+str(ijt.labels))
#print(ijt.infos)
rough_string = ET.tostring(ijt.infos[0], "utf-8")
reparsed = minidom.parseString(rough_string)
print(reparsed.toprettyxml(indent="\t"))
#print(ijt.props)
# needed properties:
print("Tiles shape: "+str(ijt.tileW)+"x"+str(ijt.tileH))
try:
print("Data min: "+str(ijt.data_min))
print("Data max: "+str(ijt.data_max))
except:
print (" No min/max are provided in 32-bit mode)")
print(ijt.image.shape)
# layer order: ['diagm-pair', 'diago-pair', 'hor-pairs', 'vert-pairs', 'other']
# now split this into tiles:
#tiles = get_tile_images(ijt.image,ijt.tileW,ijt.tileH)
#print(tiles.shape)
# tiles = ijt.getstack(['diagm-pair','diago-pair','hor-pairs','vert-pairs'],shape_as_tiles=True)
tiles = ijt.getstack(['diagm-aux','diago-aux','hor-aux','vert-aux'],shape_as_tiles=True)
print("Stack of images shape: "+str(tiles.shape))
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
# provide layer name
values = ijt.getvalues(label='other')
print("Stack of values shape: "+str(values.shape))
# each tile's disparity:
fig = plt.figure()
fig.suptitle("Estimated Disparity")
plt.imshow(values[:,:,0])
plt.colorbar()
fig = plt.figure()
fig.suptitle("Esitmated+Residual disparity")
plt.imshow(values[:,:,1])
plt.colorbar()
fig = plt.figure()
fig.suptitle("Residual disparity confidence")
plt.imshow(values[:,:,2])
plt.colorbar()
print(bcolors.BOLDWHITE+"time: "+str(time.time())+bcolors.ENDC)
#print(values)
#print(value_tiles[131,162].flatten())
#print(np.ravel(value_tiles[131,162]))
#values = np.empty((vt.shape[0],vt.shape[1],3))
#for i in range(values.shape[0]):
# for j in range(values.shape[1]):
# values[i,j,0] = get_v1()
#print(tiles[121,160,:,:,0].shape)
#_nrows = int(ijt.image.shape[0] / ijt.tileH)
#_ncols = int(ijt.image.shape[1] / ijt.tileW)
#_nrows = 32
#_ncols = 32
#print(str(_nrows)+" "+str(_ncols))
#fig, ax = plt.subplots(nrows=_nrows, ncols=_ncols)
#for i in range(_nrows):
# for j in range(_ncols):
# ax[i,j].imshow(tiles[i+100,j,:,:,0])
# ax[i,j].set_axis_off()
#for i in range(5):
# fig = plt.figure()
# plt.imshow(tiles[121,160,:,:,i])
# plt.colorbar()
#ijt.show_images(['other'])
#ijt.show_images([0,3])
#ijt.show_images(['X-corr','Y-corr'])
#ijt.show_images(['R-vign',3])
ijt.show_images()
plt.show()
input("All done. Press ENTER to close images and exit...")
# Examples
# 1: get default stack of images
#a = ijt.getstack()
#print(a.shape)
# 2: get defined ordered stack of images by tiff image index or by label name
#a = ijt.getstack([1,2,'X-corr'])
#print(a.shape)
# 3: will throw an error if there's no such label
#a = ijt.getstack([1,2,'Unknown'])
#print(a.shape)
# 4: will throw an error if index is out of bounds
#a = ijt.getstack([1,2,'X-corr'])
#print(a.shape)
# 5: dev excercise
#a = np.array([[1,2],[3,4]])
#b = np.array([[5,6],[7,8]])
#c = np.array([[10,11],[12,13]])
#print("test1:")
#ka = (a,b,c)
#d = np.stack(ka,axis=2)
#print(d)
#print("test2:")
#e = np.stack((d[:,:,1],d[:,:,0]),axis=2)
#print(e)