Commit fd67c744 authored by Andrey Filippov's avatar Andrey Filippov

updating before branching

parent a817f0c0
......@@ -3335,6 +3335,7 @@ public class GpuQuad{ // quad camera description
public TpTask[] setInterTasks(
final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
final boolean [] selection, // may be null, if not null do not process unselected tiles
final GeometryCorrection geometryCorrection,
final double disparity_corr,
final int margin, // do not use tiles if their centers are closer to the edges
......@@ -3346,6 +3347,7 @@ public class GpuQuad{ // quad camera description
img_width, // final int img_width,
calcPortsCoordinatesAndDerivatives, // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
geometryCorrection, // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
......@@ -3358,6 +3360,7 @@ public class GpuQuad{ // quad camera description
final int img_width,
final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
final boolean [] selection, // may be null, if not null do not process unselected tiles
final GeometryCorrection geometryCorrection,
final double disparity_corr,
final int margin, // do not use tiles if their centers are closer to the edges
......@@ -3395,9 +3398,7 @@ public class GpuQuad{ // quad camera description
threads[ithread] = new Thread() {
@Override
public void run() {
// for (int indx = ai.getAndIncrement(); indx < tp_tasks.length; indx = ai.getAndIncrement()) {
// int nTile = tile_indices[indx];
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) if (pXpYD[nTile] != null) {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) if ((pXpYD[nTile] != null) && ((selection == null) || selection[nTile])) {
int tileY = nTile / tilesX;
int tileX = nTile % tilesX;
TpTask tp_task = new TpTask(num_cams, tileX, tileY);
......
......@@ -107,6 +107,8 @@ public class Corr2dLMA {
private double [] values;
// next values are only updated after success
private int iter = 0; // to be able to read number of used iterations
private double [] last_rms = null; // {rms, rms_pure}, matching this.vector
private double [] good_or_bad_rms = null; // just for diagnostics, to read last (failed) rms
private double [] initial_rms = null; // {rms, rms_pure}, first-calcualted rms
......@@ -153,6 +155,11 @@ public class Corr2dLMA {
public int bad_tile=-1; // bad tile - exponent got infinite, remove the tile and start over again
// happens in gaussian mode with the convex area around wrong maximum
public int getNumIter() {
return iter;
}
// private int iter = 0; // to be able to read number of used iterations
public class Sample{ // USED in lwir
int tile; // tile in a cluster
......@@ -207,33 +214,13 @@ public class Corr2dLMA {
this.gaussian_mode = gaussian_mode;
this.used_cams_map = new int[num_cams];
/*
this.pindx = new int [num_cams][num_cams];
for (int f = 0; f < num_cams; f++) {
pindx[f][f]=-1;
for (int s = f+1; s < num_cams; s++) {
pindx[f][s] = getPairIndex(f,s);
pindx[s][f] = pindx[f][s];
}
}
*/
this.numTiles = numTiles;
ddisp_index = this.numTiles * this.tile_params; // ;
ndisp_index = ddisp_index + num_cams; // disparity offset per camera - none should be disable
num_all_pars = ndisp_index+ num_cams; // maximal number of parameters
// boolean sq = true; // false;
this.transform_size = ts;
this.corr_wnd = corr_wnd;
}
/*
public static double [][] getCorrWnd(int transform_size){
return getCorrWnd(transform_size, true);// false);
}
*/
public static double [][] getCorrWnd(int transform_size, double pwr){ // sq = false
// double pwr = 1.3;
double [][] corr_wnd = new double[2 * transform_size - 1][2 * transform_size - 1];
......@@ -264,32 +251,13 @@ public class Corr2dLMA {
}
return corr_wnd;
}
// public double[][] getCorrWnd() {
// return this.corr_wnd;
// }
/*
public void addSample( // x = 0, y=0 - center
int tile,
int fcam, // first camera index
int scam, // second camera index
int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
int y, // y coordinate (0 - disparity axis)
double v, // correlation value at that point
double w) { // sample weight
if ((w > 0) && !Double.isNaN(v)) samples.add(new Sample(tile,fcam,scam,x,y,v,w));
}
*/
public void addSample( // x = 0, y=0 - center
int tile,
int pair,
// int fcam, // first camera index
// int scam, // second camera index
int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
int y, // y coordinate (0 - disparity axis)
double v, // correlation value at that point
double w) { // sample weight
// if ((w > 0) && !Double.isNaN(v)) samples.add(new Sample(tile,fcam,scam,x,y,v,w));
if ((w > 0) && !Double.isNaN(v)) samples.add(new Sample(tile,pair,x,y,v,w));
}
......@@ -307,7 +275,7 @@ public class Corr2dLMA {
return samples;
}
// maybe in the future - just remove a pad pair?
// maybe in the future - just remove a bad pair?
private void removeBadTile(
int ntile,
int fcam, // not yet used, maybe try removing just a pair, not all tile?
......@@ -338,9 +306,6 @@ public class Corr2dLMA {
}
public double [][][] dbgGetSamples(double [][] ds, int mode){
// int [][] comb_map = getCombMap();
// int numPairs = comb_map[0][0];
// comb_map[0][0] = -1;
int numPairs = getNumAllTilesUsedPairs();
int size = 2* transform_size -1;
int size2 = size*size;
......@@ -366,8 +331,6 @@ public class Corr2dLMA {
if (mode == 0) d = s.v;
else if (mode == 1) d = s.w;
else if (mode == 2) d = fx[ns];
// int np = comb_map[s.fcam][s.scam]; ////////////////////
// int np = s.pair; ////////////////////
int inp = used_pairs_map[s.tile][s.pair];
rslt[s.tile][inp][s.iy*size + s.ix] = d;
}
......@@ -378,7 +341,6 @@ public class Corr2dLMA {
public String [] dbgGetSliceTitles() {
int [] comb_map = getCombMap();
int np = getNumAllTilesUsedPairs(); // comb_map[0][0];
// comb_map[0][0] = -1;
String [] srslt = new String [np];
for (int npair = 0; npair < comb_map.length; npair++) {
if (comb_map[npair] >= 0) {
......@@ -389,27 +351,6 @@ public class Corr2dLMA {
return srslt;
}
/*
@Deprecated
public int [][] getCombMap(){
boolean [][] comb_pairs = new boolean[num_cams][num_cams];
for (int t = 0; t < numTiles; t++) {
for (int f = 0; f < num_cams; f++) for (int s = 0; s < num_cams; s++) {
comb_pairs[f][s] |= used_pairs_map[t][f][s] >= 0;
}
}
int np = 0;
int [][] comb_map = new int [num_cams][num_cams];
for (int f = 0; f < num_cams; f++) for (int s = 0; s < num_cams; s++) {
if (comb_pairs[f][s]) comb_map[f][s] = np++;
else comb_map[f][s] = -1;
}
comb_map[0][0] = np;
return comb_map;
}
*/
public int [] getCombMap(){
int []comb_map = new int [num_pairs];
Arrays.fill(comb_map, -1);
......@@ -625,8 +566,6 @@ public class Corr2dLMA {
boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
boolean adjust_lazyeye_ortho, // obsolete - make == adjust_lazyeye_par adjust disparity corrections orthogonal to disparities lma_adjust_ly1
double [][] disp_str, // initial value of disparity
// double [][] ly_offsets_pairs, // common for all tiles: initial per sensor x,y LY offsets (or null)
// double [][] ly_offsets_pairs, // common for all tiles: initial per pair x,y LY offsets (or null)
double half_width, // A=1/(half_widh)^2 lma_half_width
double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
......@@ -634,18 +573,13 @@ public class Corr2dLMA {
adjust_lazyeye_ortho = adjust_lazyeye_par; // simplify relations for the calculated/dependent parameters
lazy_eye = adjust_lazyeye_par | adjust_lazyeye_ortho;
bad_tile = -1;
// used_pairs_map = new int [numTiles][num_cams][num_cams];
used_cameras = new boolean[num_cams];
boolean [][] used_pairs = new boolean[numTiles][num_pairs];
// 0-weight values and NaN-s should be filtered on input!
// for (int t = 0; t < numTiles; t++) for (int f = 0; f < num_cams; f++) for (int s = 0; s < num_cams; s++) {
// used_pairs_map[t][f][s] = -1;
// }
used_pairs_map = new int [numTiles][num_pairs];
for (int t = 0; t < numTiles; t++) {
Arrays.fill(used_pairs_map[t], -1);
}
// boolean [][][] used_pairs_dir = new boolean [numTiles][num_cams][num_cams];
used_tiles = new boolean[numTiles];
for (Sample s:samples) { // ignore zero-weight samples
int pair = s.pair;
......@@ -654,8 +588,6 @@ public class Corr2dLMA {
used_cameras[fscam[1]]=true;
used_tiles[s.tile] = true;
used_pairs[s.tile][pair]=true; // throws < 0 - wrong pair, f==s
// used_pairs_dir[s.tile][s.fcam][s.scam] = true;
// used_pairs_dir[s.tile][fscam[0]][fscam[1]] = true;
}
ncam_used = 0;
npairs =new int [numTiles]; // pairs in each tile
......@@ -686,22 +618,8 @@ public class Corr2dLMA {
upmam[i] = npairs[nTile];
if (used_pairs[nTile][i]) npairs[nTile]++;
}
/*
for (int f = 0; f < num_cams; f++) {
for (int s = f+1; s < num_cams; s++) {
int npair = upmam[pindx[f][s]];
if (used_pairs_dir[nTile][f][s]) used_pairs_map[nTile][f][s] = npair; // either or, can not be f,s and s,f pairs
else if (used_pairs_dir[nTile][s][f]) used_pairs_map[nTile][s][f] = npair;
}
}
*/
for (int pair = 0; pair < num_pairs; pair++) {
int npair = upmam[pair];
// int [] fs = correlation2d.getPair(pair);
// if (used_pairs_dir[nTile][fs[0]][fs[1]]) used_pairs_map[nTile][fs[0]][fs[1]] = npair; // either or, can not be f,s and s,f pairs
// else if (used_pairs_dir[nTile][fs[1]][fs[0]]) used_pairs_map[nTile][fs[1]][fs[0]] = npair;
// if (used_pairs_dir[nTile][fs[0]][fs[1]]) used_pairs_map[nTile][pair] = npair; // either or, can not be f,s and s,f pairs
// else if (used_pairs_dir[nTile][fs[1]][fs[0]]) used_pairs_map[nTile][pair] = npair;
if (used_pairs[nTile][pair]) used_pairs_map[nTile][pair] = npair;
}
}
......@@ -752,7 +670,6 @@ public class Corr2dLMA {
total_weight += s.w;
values[i] = s.v;
sw += weights[i];
// int indx = G0_INDEX + pindx[s.fcam][s.scam] + s.tile * tile_params;
int indx = G0_INDEX + s.pair + s.tile * tile_params;
double d = s.v;
if (this.corr_wnd !=null) {
......@@ -794,14 +711,11 @@ public class Corr2dLMA {
public void initMatrices() { // should be called after initVector and after setMatrices
// m_pairs = new Matrix[used_pairs_map.length][num_cams][num_cams];
// m_pairs_inv = new Matrix[used_pairs_map.length][num_cams][num_cams];
m_pairs = new Matrix[used_pairs_map.length][num_pairs];
m_pairs_inv = new Matrix[used_pairs_map.length][num_pairs];
for (int nTile = 0; nTile < used_pairs_map.length; nTile++) if (used_tiles[nTile]){
for (int npair = 0; npair < num_pairs; npair++) {
int [] fs = correlation2d.getPair(npair); // TODO: change used_pairs_map?
// if (used_pairs_map[nTile][fs[0]][fs[1]] >= 0) {
if (used_pairs_map[nTile][npair] >= 0) {
m_pairs[nTile][npair] = m_disp[nTile][fs[0]].minus(m_disp[nTile][fs[1]]);
m_pairs_inv[nTile][npair] = m_pairs[nTile][npair].inverse();
......@@ -812,13 +726,11 @@ public class Corr2dLMA {
public void initInvertMatrices() { // should be called after initMatrices only if m_pairs_inv are needed
// m_pairs_inv = new Matrix[used_pairs_map.length][num_cams][num_cams];
m_pairs_inv = new Matrix[used_pairs_map.length][num_pairs];
for (int nTile = 0; nTile < used_pairs_map.length; nTile++) if (used_tiles[nTile]){
for (int npair = 0; npair < num_pairs; npair++) {
int [] fs = correlation2d.getPair(npair); // TODO: change used_pairs_map?
// if (used_pairs_map[nTile][fs[0]][fs[1]] >= 0) {
if (used_pairs_map[nTile][npair] >= 0) {
m_pairs_inv[nTile][npair] = m_pairs[nTile][npair].inverse();
}
......@@ -850,7 +762,6 @@ public class Corr2dLMA {
} else {
bv = s.v / corr_wnd[s.iy][s.ix];
}
// bv /=this.all_pars[G0_INDEX + pindx[s.fcam][s.scam] + s.tile * tile_params];
bv /=this.all_pars[G0_INDEX + s.pair + s.tile * tile_params];
//corr_wnd
int indx = 2 * ns;
......@@ -863,7 +774,6 @@ public class Corr2dLMA {
double [] aXY = {s.ix - center, s.iy - center};
Matrix mXY = new Matrix(aXY,2);
// Matrix mDDND = m_pairs_inv[s.tile][s.fcam][s.scam].times(mXY);
Matrix mDDND = m_pairs_inv[s.tile][s.pair].times(mXY);
mdata[indx ][0][0] = mDDND.get(0, 0); // dd
......@@ -1532,9 +1442,9 @@ public class Corr2dLMA {
double [] BT = new double [numTiles]; // av[B_INDEX];
double [] CT = new double [numTiles]; // A + av[CMA_INDEX];
for (int nTile = 0; nTile < numTiles; nTile++) if (used_tiles[nTile]){
for (int i = 0; i < num_cams; i++) if (used_cameras[i]) {
double [] add_dnd = {av[DISP_INDEX+ nTile * tile_params]+ av[ddisp_index + i], av[ndisp_index + i]};
xcam_ycam[nTile][i] = m_disp[nTile][i].times(new Matrix(add_dnd,2));
for (int ncam = 0; ncam < num_cams; ncam++) if (used_cameras[ncam]) {
double [] add_dnd = {av[DISP_INDEX+ nTile * tile_params]+ av[ddisp_index + ncam], av[ndisp_index + ncam]};
xcam_ycam[nTile][ncam] = m_disp[nTile][ncam].times(new Matrix(add_dnd,2));
}
for (int f = 0; f < num_cams; f++) if (used_cameras[f]) {
for (int s = 0; s < num_cams; s++) if (used_cameras[s]) {
......@@ -1590,7 +1500,7 @@ public class Corr2dLMA {
if (s.tile > 0) {
System.out.print("");
}
if (jt != null) {
if (jt != null) { // Need derivatives too, no just Fx
if (par_map[DISP_INDEX + s.tile*tile_params] >= 0) jt[par_map[DISP_INDEX + s.tile*tile_params]][ns] = 2 * WGpexp *
((A * xmxp + B * ymyp) * m_pairs[s.tile][s.pair].get(0, 0)+
(B * xmxp + C * ymyp) * m_pairs[s.tile][s.pair].get(1, 0));
......@@ -1698,6 +1608,33 @@ public class Corr2dLMA {
return fx;
}
/**
* Calculate each defined pair x,y expected offset, assuming only disparity, not lazy eye
* @param corrs - pairs 2D correlations (each in scanline order) - just to determine null/non-null
* @param disparity - expected disparity (e.g. from CM)
* @param disp_dist - per camera disparity matrix as a 1d (linescan order))
* @return per pair x,y expected center offset in 2D correlations or nulls for undefined pairs (or null if already set)
*/
public double [][] getPairsOffsets(
double [][] corrs,
boolean [] pair_mask,
double disparity,
double [][] disp_dist){ //
double [][] xy_offsets = new double [corrs.length][];
if (disp_dist != null) {
setMatrices(disp_dist);
}
for (int pair = 0; pair < xy_offsets.length; pair++) if ((pair < correlation2d.getNumPairs()) && (corrs[pair] != null) && ((pair_mask == null) || pair_mask[pair])){ // OK to calculate for each
int [] fscam = correlation2d.getPair(pair); // returns [first_cam, second_cam]
Matrix mdd_dnd = new Matrix(new double[] {-disparity, 0.0},2);
Matrix xcam_ycam_f = m_disp[0][fscam[0]].times(mdd_dnd);
Matrix xcam_ycam_s = m_disp[0][fscam[1]].times(mdd_dnd);
xy_offsets[pair] = xcam_ycam_f.minus(xcam_ycam_s).getColumnPackedCopy();
}
return xy_offsets;
}
public void printParams() { // not used in lwir
// to make sure it is updated
......@@ -1872,7 +1809,6 @@ public class Corr2dLMA {
public void updateFromVector() { // USED in lwir
int np = 0;
// all_pars = fromVector(vector);//
for (int i = 0; i < par_mask.length; i++) if (par_mask[i]) all_pars[i] = vector[np++];
// just for reporting
......@@ -1885,9 +1821,6 @@ public class Corr2dLMA {
}
Matrix m5 = new Matrix(a5,a5.length); // single column, normally 5 rows
Matrix m3 = mddnd.times(m5);
// all_pars[ddisp_index + used_cams_rmap[pre_last_cam]] = m3.get(0, 0);
// all_pars[ddisp_index + used_cams_rmap[last_cam]] = m3.get(1, 0);
// all_pars[ndisp_index + used_cams_rmap[last_cam]] = m3.get(2, 0);
all_pars[ddisp_index + pre_last_cam] = m3.get(0, 0);
all_pars[ddisp_index + last_cam] = m3.get(1, 0);
all_pars[ndisp_index + last_cam] = m3.get(2, 0);
......@@ -1910,9 +1843,6 @@ public class Corr2dLMA {
}
Matrix m5 = new Matrix(a5,a5.length); // single column, normally 5 rows
Matrix m3 = mddnd.times(m5);
// ap[ddisp_index + used_cams_rmap[pre_last_cam]] = m3.get(0, 0);
// ap[ddisp_index + used_cams_rmap[last_cam]] = m3.get(1, 0);
// ap[ndisp_index + used_cams_rmap[last_cam]] = m3.get(2, 0);
ap[ddisp_index + pre_last_cam] = m3.get(0, 0);
ap[ddisp_index + last_cam] = m3.get(1, 0);
ap[ndisp_index + last_cam] = m3.get(2, 0);
......@@ -2426,9 +2356,6 @@ public class Corr2dLMA {
public double [][] getABCTile(){
double [][] abc = new double[numTiles][3];
for (int tile = 0; tile < numTiles; tile++) {
// abc[tile][0] = (par_mask[A_INDEX+ tile * tile_params])? all_pars[A_INDEX+ tile * tile_params] :Double.NaN;
// abc[tile][1] = (par_mask[B_INDEX+ tile * tile_params])? all_pars[B_INDEX+ tile * tile_params] :Double.NaN;
// abc[tile][2] = abc[tile][0] + ( (par_mask[CMA_INDEX+ tile * tile_params])? all_pars[CMA_INDEX+ tile * tile_params] :Double.NaN);
abc[tile][0] = all_pars[A_INDEX+ tile * tile_params];
abc[tile][1] = all_pars[B_INDEX+ tile * tile_params];
abc[tile][2] = abc[tile][0] + all_pars[CMA_INDEX+ tile * tile_params];
......@@ -2461,7 +2388,6 @@ public class Corr2dLMA {
{
boolean [] rslt = {false,false};
this.last_rms = null;
int iter = 0;
for (iter = 0; iter < num_iter; iter++) {
rslt = lmaStep(
lambda,
......
......@@ -3860,14 +3860,50 @@ public class Correlation2d {
int tileX, // just for debug output
int tileY
)
{
return corrLMA2Single( // single tile
imgdtt_params,
adjust_ly, // adjust Lazy Eye
corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // may have more elements than pair_mask (corrs may have combo as last elements)
disp_dist, // per camera disparity matrix as a 1d (linescan order)
rXY, // non-distorted X,Y offset per nominal pixel of disparity
pair_mask, // which pairs to process
disp_str, // -preliminary center x in pixels for largest baseline
poly_ds, // null or pair of disparity/strength
vasw_pwr, // value as weight to this power,
null, //double [] debug_lma_tile,
debug_level,
tileX, // just for debug output
tileY);
}
public Corr2dLMA corrLMA2Single( // single tile
ImageDttParameters imgdtt_params,
boolean adjust_ly, // adjust Lazy Eye
double [][] corr_wnd, // correlation window to save on re-calculation of the window
double [] corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
double [][] corrs, // may have more elements than pair_mask (corrs may have combo as last elements)
double [][] disp_dist, // per camera disparity matrix as a 1d (linescan order)
double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
boolean [] pair_mask, // which pairs to process
double[] disp_str, // -preliminary center x in pixels for largest baseline
double[] poly_ds, // null or pair of disparity/strength
double vasw_pwr, // value as weight to this power,
double [] debug_lma_tile,
int debug_level,
int tileX, // just for debug output
int tileY
)
{
// corrs are organized as PAIRS, some are null if not used
// for each enabled and available pair find a maximum, filter convex and create sample list
boolean need_poly = (disp_str == null); // true; // find initial disparity by polynomial approximation
boolean debug_graphic = imgdtt_params.lma_debug_graphic && (imgdtt_params.lma_debug_level1 > 3) && (debug_level > 0) ;
debug_graphic |= imgdtt_params.lmamask_dbg && (debug_level > 0);
String dbg_title = null;
if (debug_graphic) {
// if (imgdtt_params.lma_debug_graphic) {
dbg_title = String.format("tX%d_tY%d",tileX,tileY);
}
DoubleGaussianBlur gb = null;
......@@ -3882,10 +3918,43 @@ public class Correlation2d {
rXY, //double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
imgdtt_params.lmas_gaussian //boolean gaussian_mode
);
double [] corr_shape = null;
double [] corr_shape_dia = null;
double [] norm_shape = null;
double [][] pair_shape_masks = null;
double [][] pair_offsets = null;
/// if (imgdtt_params.lmamask_en && (disp_str != null)) {
if (disp_str != null) {
pair_offsets = lma.getPairsOffsets(
corrs, // double [][] corrs,
pair_mask, // boolean [] pair_mask,
disp_str[0]/imgdtt_params.lmamask_magic, // double disparity,
disp_dist); // double [][] disp_dist);
corr_shape = getCorrShape(
corrs, // double [][] corrs,
pair_offsets); // double [][] xy_offsets)
if (debug_graphic) {
int min_dia = 96;
double [][] corrs_dia = new double[corrs.length][];
for (int i = min_dia; i < corrs.length; i++) {
corrs_dia[i] = corrs[i];
}
corr_shape_dia = getCorrShape(
corrs_dia, // double [][] corrs,
pair_offsets); // double [][] xy_offsets)
}
norm_shape = conditionCorrShape(
corr_shape, // double [] corrs_shape,
imgdtt_params.lmamask_min_main, // double min_main,
imgdtt_params.lmamask_min_neib, // double min_neib,
imgdtt_params.lmamask_weight_neib, // double weight_neib);
imgdtt_params.lmamask_weight_neib_neib); // double weight_neib_neib
pair_shape_masks = applyCorrShape(
norm_shape, // double [] corrs_shape,
pair_offsets); // double [][] xy_offsets)
}
double [][] dbg_corr = debug_graphic ? new double [corrs.length][] : null;
// double [][] dbg_weights = debug_graphic ? new double [corrs.length][] : null;
if (debug_graphic) {
(new ShowDoubleFloatArrays()).showArrays(
corrs,
......@@ -3894,65 +3963,111 @@ public class Correlation2d {
true,
"corr_pairs"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
}
// for (int npair = 0; npair < corrs.length; npair++) if ((corrs[npair] != null) && (((pair_mask >> npair) & 1) !=0)){
double [][] filtWeight = new double [corrs.length][];
for (int npair = 0; npair < pair_mask.length; npair++) if ((corrs[npair] != null) && (pair_mask[npair])){
// double[] corr = corrs[npair].clone();
double [] corr_blur = corrs[npair].clone();
if (corr_wnd_inv_limited != null) {
for (int i = 0; i < corr_blur.length; i++) {
corr_blur[i] *= corr_wnd_inv_limited[i];
}
if (corr_shape != null) {
(new ShowDoubleFloatArrays()).showArrays(
new double [][] {corr_shape,norm_shape, corr_shape_dia},
corr_size,
corr_size,
true,
"corr_shape"+"_x"+tileX+"_y"+tileY,
new String [] {"corr_shape","norm_shape","corr_shape_dia"});
}
if (imgdtt_params.lma_sigma > 0) {
gb.blurDouble(corr_blur, corr_size, corr_size, imgdtt_params.lma_sigma, imgdtt_params.lma_sigma, 0.01);
if (pair_shape_masks != null) {
(new ShowDoubleFloatArrays()).showArrays(
pair_shape_masks,
corr_size,
corr_size,
true,
"corr_shape_masks"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
}
int imx = imgdtt_params.lma_soft_marg * (corr_size + 1);
for (int iy = imgdtt_params.lma_soft_marg; iy < (corr_size - imgdtt_params.lma_soft_marg); iy++) {
for (int ix = imgdtt_params.lma_soft_marg; ix < (corr_size - imgdtt_params.lma_soft_marg); ix++) {
int indx = iy * corr_size + ix;
if (corr_blur[indx] > corr_blur[imx]) imx = indx;
}
}
}
// try alternative mask generation by accumulation of the pre-shifted (from CM estimation with magic 0.85) correlations
// filter convex
int ix0 = (imx % corr_size) - center; // signed, around center to match filterConvex
int iy0 = (imx / corr_size) - center; // signed, around center to match filterConvex
filtWeight[npair] = filterConvex(
corr_blur, // double [] corr_data,
imgdtt_params.cnvx_hwnd_size, // int hwin,
ix0, // int x0,
iy0, // int y0,
imgdtt_params.cnvx_add3x3, // boolean add3x3,
imgdtt_params.cnvx_weight, // double nc_cost,
(debug_level > 2)); // boolean debug);
if (dbg_corr != null) dbg_corr [npair] = corr_blur;
// if (dbg_weights != null) dbg_weights[npair] = filtWeight[npair];
double [][] filtWeight = new double [corrs.length][];
double [][] samplesWeight = new double [corrs.length][];
int num_disp_samples = 0;
int num_cnvx_samples = 0;
int num_comb_samples = 0;
for (int npair = 0; npair < pair_mask.length; npair++) if ((corrs[npair] != null) && (pair_mask[npair])){
double [] corr_blur = null;
if (imgdtt_params.cnvx_en || (pair_shape_masks == null)) {
corr_blur = corrs[npair].clone();
if (corr_wnd_inv_limited != null) {
for (int i = 0; i < corr_blur.length; i++) {
corr_blur[i] *= corr_wnd_inv_limited[i];
}
}
if (imgdtt_params.lma_sigma > 0) {
gb.blurDouble(corr_blur, corr_size, corr_size, imgdtt_params.lma_sigma, imgdtt_params.lma_sigma, 0.01);
}
int imx = imgdtt_params.lma_soft_marg * (corr_size + 1);
for (int iy = imgdtt_params.lma_soft_marg; iy < (corr_size - imgdtt_params.lma_soft_marg); iy++) {
for (int ix = imgdtt_params.lma_soft_marg; ix < (corr_size - imgdtt_params.lma_soft_marg); ix++) {
int indx = iy * corr_size + ix;
if (corr_blur[indx] > corr_blur[imx]) imx = indx;
}
}
// filter convex
int ix0 = (imx % corr_size) - center; // signed, around center to match filterConvex
int iy0 = (imx / corr_size) - center; // signed, around center to match filterConvex
filtWeight[npair] = filterConvex(
corr_blur, // double [] corr_data,
imgdtt_params.cnvx_hwnd_size, // int hwin,
ix0, // int x0,
iy0, // int y0,
imgdtt_params.cnvx_add3x3, // boolean add3x3,
imgdtt_params.cnvx_weight, // double nc_cost,
(debug_level > 2)); // boolean debug);
}
if (dbg_corr != null) dbg_corr [npair] = corr_blur;
// Normalize weight for each pair to compensate for different number of convex samples?
// int fcam = PAIRS[npair][0];
// int scam = PAIRS[npair][1];
for (int i = 1; i < filtWeight[npair].length; i++) if (filtWeight[npair][i] > 0.0) {
// Combine/use window masks
if (filtWeight[npair] == null) {
samplesWeight[npair] = (pair_shape_masks != null)? pair_shape_masks[npair] : null;
} else if ((pair_shape_masks == null) || (pair_shape_masks[npair] == null) || !imgdtt_params.lmamask_en) {
samplesWeight[npair] = filtWeight[npair];
} else {
samplesWeight[npair] = filtWeight[npair].clone();
if (imgdtt_params.cnvx_or) {
for (int i = 0; i < samplesWeight[npair].length; i++) {
samplesWeight[npair][i] = Math.max(samplesWeight[npair][i], pair_shape_masks[npair][i]);
}
} else {
for (int i = 0; i < samplesWeight[npair].length; i++) {
samplesWeight[npair][i] *= pair_shape_masks[npair][i];
}
}
}
if (debug_lma_tile != null) { // calculate and return number of non-zero tiles
if (pair_shape_masks[npair] != null) {
for (int i = 0; i < samplesWeight[npair].length; i++) if (samplesWeight[npair][i] > 0.0) num_disp_samples++;
}
if (filtWeight[npair] != null) {
for (int i = 0; i < filtWeight[npair].length; i++) if (filtWeight[npair][i] > 0.0) num_cnvx_samples++;
}
if (samplesWeight[npair] != null) {
for (int i = 0; i < samplesWeight[npair].length; i++) if (samplesWeight[npair][i] > 0.0) num_comb_samples++;
}
}
for (int i = 1; i < samplesWeight[npair].length; i++) if (samplesWeight[npair][i] > 0.0) {
int ix = i % corr_size; // >=0
int iy = i / corr_size; // >=0
double v = corrs[npair][i]; // not blurred
double w = filtWeight[npair][i];
double w = samplesWeight[npair][i];
if (vasw_pwr != 0) {
w *= Math.pow(Math.abs(v), vasw_pwr);
}
// if (v > blur_max[npair]) blur_max[npair] = v;
lma.addSample( // x = 0, y=0 - center
0, // tile
npair,
// fcam, // int fcam, // first camera index
// scam, // int scam, // second camera index
ix, // int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
iy, // int y, // y coordinate (0 - disparity axis)
v, // double v, // correlation value at that point
......@@ -3960,47 +4075,64 @@ public class Correlation2d {
}
}
if (debug_graphic) {
(new ShowDoubleFloatArrays()).showArrays(
dbg_corr,
corr_size,
corr_size,
true,
"corr_blurred"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
(new ShowDoubleFloatArrays()).showArrays(
filtWeight,
corr_size,
corr_size,
true,
"corr_weights"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
}
if (debug_lma_tile != null) { // calculate and return number of non-zero tiles
debug_lma_tile[0] = num_disp_samples;
debug_lma_tile[1] = num_cnvx_samples;
debug_lma_tile[2] = num_comb_samples;
debug_lma_tile[3] = -1; // number of LMA iterations
debug_lma_tile[4] = -1; // last number of LMA iterations
debug_lma_tile[5] = -1; // LMA RMA
}
if (debug_graphic) {
if (dbg_corr != null) {
(new ShowDoubleFloatArrays()).showArrays(
dbg_corr,
corr_size,
corr_size,
true,
"corr_blurred"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
}
if (filtWeight != null) {
(new ShowDoubleFloatArrays()).showArrays(
filtWeight,
corr_size,
corr_size,
true,
"filt_weight"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
}
if (samplesWeight != null) {
(new ShowDoubleFloatArrays()).showArrays(
samplesWeight,
corr_size,
corr_size,
true,
"samples_weights"+"_x"+tileX+"_y"+tileY,
getCorrTitles());
}
}
// double [][] disp_str = {{xcenter, 1.0}}; // temporary
double [][] disp_str2 = {{0.0, 1.0}}; // temporary // will be calculated/set later
if (disp_str != null) {
disp_str2[0] = disp_str;
}
// double [][] disp_str2 = {{0.0, 1.0}}; // temporary // will be calculated/set later
boolean lmaSuccess = false;
int num_lma_retries = 0;
double [] disp = null;
// adjust_ly
double [][] ly_offsets_pairs = null;
if (adjust_ly) {
ly_offsets_pairs = getPairsCenters(
corrs, // double [][] corrs,
filtWeight); // double [][] weights)
samplesWeight); // double [][] weights)
}
double step_weight = 0.5; // scale corrections
double min_correction = 0.1; // exit when maximal XY correction is below
while (!lmaSuccess) {
num_lma_retries ++; // debug
lma.initVector(
imgdtt_params.lmas_adjust_wm, // boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
imgdtt_params.lmas_adjust_ag, // boolean adjust_scales, // adjust 2D correlation scales - lma_adjust_ag
......@@ -4080,9 +4212,9 @@ public class Correlation2d {
}
lmaSuccess = lma.runLma(
imgdtt_params.lmas_lambda_initial, // double lambda, // 0.1
imgdtt_params.lma_lambda_scale_good, // double lambda_scale_good,// 0.5
imgdtt_params.lma_lambda_scale_bad, // double lambda_scale_bad, // 8.0
imgdtt_params.lma_lambda_max, // double lambda_max, // 100
imgdtt_params.lma_lambda_scale_good, // double lambda_scale_good,// 0.5
imgdtt_params.lma_lambda_scale_bad, // double lambda_scale_bad, // 8.0
imgdtt_params.lma_lambda_max, // double lambda_max, // 100
imgdtt_params.lmas_rms_diff, // double rms_diff, // 0.001
imgdtt_params.lmas_num_iter, // int num_iter, // 20
debug_level); // imgdtt_params.lma_debug_level1); // 4); // int debug_level) // > 3
......@@ -4119,6 +4251,10 @@ public class Correlation2d {
if (debug_level > -2) { // 0
System.out.println(String.format("Poly disparity=%8.5f , str=%8.5f", disp[0],disp[1]));
}
if (debug_lma_tile != null) {
debug_lma_tile[3] = num_lma_retries; // number of wasted attempts
debug_lma_tile[4] = lma.getNumIter();
}
} else {
if (debug_level > -2) {
System.out.println(String.format("Poly disparity=%8.5f , str=%8.5f, LMA disparity=%8.5f, str=%8.5f",
......@@ -4127,6 +4263,12 @@ public class Correlation2d {
// System.out.println("dispStr[0][0]="+dispStr[0][0]+" dispStr[0][1]="+dispStr[0][1]);
double [] rms = lma.getRMS();
if (debug_lma_tile != null) {
debug_lma_tile[3] = num_lma_retries; // number of wasted attempts
debug_lma_tile[4] = lma.getNumIter();
debug_lma_tile[5] = rms[1]; // pure rms
}
if (debug_level > 0) {
System.out.println("LMA -> "+lmaSuccess+" RMS="+rms[0]+", pure RMS="+rms[1]);
lma.printParams();
......@@ -4162,10 +4304,187 @@ public class Correlation2d {
}
}
} else if (debug_lma_tile != null) {
debug_lma_tile[3] = num_lma_retries; // number of wasted attempts
}
return lmaSuccess? lma: null;
}
/**
* Condition correlation shape to use as a mask
* @param corrs_shape raw centered correlation shape calculated with getCorrShape()
* @param min_main threshold to keep points regardless of neighbors (fraction of maximum)
* @param min_neib threshold to keep points if they have neighbors above min_main (fraction of maximum)
* @param weight_neib weight of the points that satisfy min_neib, but not min_main condition, < 1.0
* @param weight_neib_neib weight of neighbors of neighbors, regardless of value, < weight_neib
* @return weights array with 1.0 for points above min_main, weight_neib for satisfying min_neib and 0 - for others
*/
double [] conditionCorrShape(
double [] corrs_shape,
double min_main,
double min_neib,
double weight_neib,
double weight_neib_neib) {
double [] cond_shape = new double [corrs_shape.length];
double weight_main = 1.0;
int corr_size = 2 * transform_size - 1;
// TileNeibs tn = new TileNeibs(corr_size,corr_size);
int [][] offs_xy= {{0,-1},{1,-1},{1,0},{1,1},{0,1},{-1,1},{-1,0},{-1,-1}};
double max = corrs_shape[0];
for (int i = 1; i < cond_shape.length; i++) {
if (corrs_shape[i] > max) max = corrs_shape[i];
}
min_main *= max;
min_neib *= max;
for (int i = 0; i < cond_shape.length; i++) if (corrs_shape[i] >= min_main) {
cond_shape[i] = weight_main;
}
if (weight_neib > 0.0) {
for (int y = 0; y < corr_size; y++) {
for (int x = 0; x < corr_size; x++) {
int indx = y * corr_size + x;
if ((cond_shape[indx] == 0.0) &&(corrs_shape[indx] >= min_neib)) {
for (int [] dxy : offs_xy) {
int y1 = y + dxy[1];
if ((y1 >= 0) && (y1 < corr_size)) {
int x1 = x + dxy[0];
if ((x1 >= 0) && (x1 < corr_size)) {
if (cond_shape[y1 * corr_size + x1] == weight_main) {
cond_shape[y * corr_size + x] = weight_neib;
break;
}
}
}
}
}
}
}
}
if (weight_neib_neib > 0) {
for (int y = 0; y < corr_size; y++) {
for (int x = 0; x < corr_size; x++) {
int indx = y * corr_size + x;
if (cond_shape[indx] == 0.0) {
for (int [] dxy : offs_xy) {
int y1 = y + dxy[1];
if ((y1 >= 0) && (y1 < corr_size)) {
int x1 = x + dxy[0];
if ((x1 >= 0) && (x1 < corr_size)) {
if (cond_shape[y1 * corr_size + x1] >= weight_neib) {
cond_shape[y * corr_size + x] = weight_neib_neib;
break;
}
}
}
}
}
}
}
}
return cond_shape;
}
/**
* Calculate averaged (for defined pairs) 2D correlation shape assuming only
* disparity, no lazy eye. Disparity may be estimated with averaging + CM
* @param corrs pairs 2D correlations (each in scanline order)
* @param xy_offsets per pair x,y offsets calculated with Corr2dLMA.getPairsCenters()
* @return averaged and centered (by applying disparity-defined shift) 2D correlations
* that can be used as mask for LMA input samples
*/
double [] getCorrShape(
double [][] corrs,
double [][] xy_offsets){ //
int corr_size = 2 * transform_size - 1;
double [] corr_shape = new double [corr_size*corr_size];
double [] corr_weights = new double [corr_size*corr_size];
for (int np = 0; np < corrs.length; np++) if ((xy_offsets[np] != null) && (corrs[np] != null)) {
int ix0 = (int) Math.floor(xy_offsets[np][0]);
int iy0 = (int) Math.floor(xy_offsets[np][1]);
for (int dy = 0; dy < 2; dy++) {
int iy = iy0 + dy;
int y0 = (iy > 0) ? 0 : -iy;
int y1 = (iy > 0) ? (corr_size - iy) : corr_size;
double ky = (dy > 0)? (xy_offsets[np][1] - iy0) : (iy0 + 1 - xy_offsets[np][1]);
for (int dx = 0; dx < 2; dx++) {
int ix = ix0 + dx;
int x0 = (ix > 0) ? 0 : -ix;
int x1 = (ix > 0) ? (corr_size - ix) : corr_size;
double kx = (dx > 0)? (xy_offsets[np][0] - ix0) : (ix0 + 1 - xy_offsets[np][0]);
double k = ky*kx;
int dsrc = iy * corr_size + ix;
for (int y = y0; y < y1; y++) {
for (int x = x0; x < x1; x++) {
int idst = y * corr_size + x;
int isrc = idst + dsrc;
corr_weights[idst] += k;
corr_shape[idst] += corrs[np][isrc] * k;
}
}
}
}
}
for (int i = 0; i < corr_shape.length; i++) if (corr_weights[i] > 0.0) {
corr_shape[i] /= corr_weights[i];
}
return corr_shape;
}
/**
* Shift common corrs_shape in reverse direction of xy_offsets to create per-pair selection window for LMA fitting
* @param corrs_shape centered correlation shape calculated by averaging shifted (according to disparity) 2D correlations
* @param xy_offsets pairs of per-pair shift of 2D correlations according to common disparity
* @return per-pair selection windows for LMA
*/
double [][] applyCorrShape(
double [] corrs_shape,
double [][] xy_offsets){ //
double [][] shifted_shapes = new double [xy_offsets.length][];
int corr_size = 2 * transform_size - 1;
int corr_len = corr_size*corr_size;
for (int np = 0; np < xy_offsets.length; np++) if (xy_offsets[np]!= null){
double [] corr_weights = new double [corr_len];
shifted_shapes[np] = new double [corr_len];
int ix0 = (int) Math.floor(-xy_offsets[np][0]);
int iy0 = (int) Math.floor(-xy_offsets[np][1]);
for (int dy = 0; dy < 2; dy++) {
int iy = iy0 + dy;
int y0 = (iy > 0) ? 0 : -iy;
int y1 = (iy > 0) ? (corr_size - iy) : corr_size;
double ky = (dy > 0)? (-xy_offsets[np][1] - iy0) : (iy0 + 1 + xy_offsets[np][1]);
for (int dx = 0; dx < 2; dx++) {
int ix = ix0 + dx;
int x0 = (ix > 0) ? 0 : -ix;
int x1 = (ix > 0) ? (corr_size - ix) : corr_size;
double kx = (dx > 0)? (-xy_offsets[np][0] - ix0) : (ix0 + 1 + xy_offsets[np][0]);
double k = ky*kx;
int dsrc = iy * corr_size + ix;
for (int y = y0; y < y1; y++) {
for (int x = x0; x < x1; x++) {
int idst = y * corr_size + x;
int isrc = idst + dsrc;
corr_weights[idst] += k;
shifted_shapes[np][idst] += corrs_shape[isrc] *k;
}
}
}
}
for (int i = 0; i < corr_len; i++) if (corr_weights[i] > 0) {
shifted_shapes[np][i] /= corr_weights[i];
}
}
return shifted_shapes;
}
/**
* Find individual pair centers for Lazy Eye initialization
* @param corrs per-pair 2d correlations
* @param weights per-pair, per-point sample weights
* @return per-pair x,y offsets
*/
public double [][] getPairsCenters(
double [][] corrs,
double [][] weights){
......@@ -4189,7 +4508,7 @@ public class Correlation2d {
}
return xy_offsets_pairs;
}
public Correlations2dLMA corrLMA( // USED in lwir
......
......@@ -955,28 +955,27 @@ public class ErsCorrection extends GeometryCorrection {
distortedView, // correct distortion (will need corrected background too !)
reference_xyz, // camera center in world coordinates
reference_atr); // camera orientation relative to world frame
if (xyzw == null) {
return null;
}
if (xyzw[2] > 0) {
xyzw[2] = xyzw[2];
/// return null; // can not match object behind the camera
}
ErsCorrection ers_camera = this;
if (cameraQuadCLT != null) {
ers_camera = cameraQuadCLT.getErsCorrection();
}
if (camera_xyz == null) camera_xyz = ers_camera.camera_xyz;
if (camera_atr == null) camera_atr = ers_camera.camera_atr;
double [] pXpYD = ers_camera.getImageCoordinatesERS( // USED in lwir
xyzw,
distortedCamera,
camera_xyz, // camera center in world coordinates
camera_atr, // camera orientation relative to world frame
line_err); // threshold error in scan lines (1.0)
return pXpYD;
if (xyzw == null) {
return null;
}
if (xyzw[2] > 0) {
xyzw[2] = xyzw[2];
}
ErsCorrection ers_camera = this;
if (cameraQuadCLT != null) {
ers_camera = cameraQuadCLT.getErsCorrection();
}
if (camera_xyz == null) camera_xyz = ers_camera.camera_xyz;
if (camera_atr == null) camera_atr = ers_camera.camera_atr;
double [] pXpYD = ers_camera.getImageCoordinatesERS( // USED in lwir
xyzw,
distortedCamera,
camera_xyz, // camera center in world coordinates
camera_atr, // camera orientation relative to world frame
line_err); // threshold error in scan lines (1.0)
return pXpYD;
}
/**
......
......@@ -697,6 +697,7 @@ public class ImageDtt extends ImageDttCPU {
TpTask[] tp_tasks = gpuQuad.setInterTasks(
false, // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
geometryCorrection, // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
......@@ -2307,6 +2308,7 @@ public class ImageDtt extends ImageDttCPU {
System.out.println("clt_aberrations_quad_corr_GPU(): this.gpuQuad is null, bailing out");
return;
}
final double [][] debug_offsets = new double[getNumSensors()][2];
for (int i = 0; i < imgdtt_params.lma_dbg_offset.length; i++) for (int j = 0; j < debug_offsets[i].length; j++) {
......@@ -2321,6 +2323,13 @@ public class ImageDtt extends ImageDttCPU {
// keep for now for mono, find out what do they mean for macro mode
final int corr_size = transform_size * 2 - 1;
final double [][] debug_lma = imgdtt_params.lmamask_dbg? (new double [6][tilesX*tilesY]):null;
if (debug_lma != null) {
for (int i = 0; i < debug_lma.length; i++) {
Arrays.fill(debug_lma[i], Double.NaN);
}
}
// reducing weight of on-axis correlation values to enhance detection of vertical/horizontal lines
......@@ -2422,7 +2431,7 @@ public class ImageDtt extends ImageDttCPU {
}
if (combine_corrs) {
correlation2d.generateResample( // should be called before
correlation2d.generateResample( // should be called before *** This can be done in CPU, table(s) copied to GPU
mcorr_comb_width, // combined correlation tile width
mcorr_comb_height, // combined correlation tile full height
mcorr_comb_offset, // combined correlation tile height offset: 0 - centered (-height/2 to height/2), height/2 - only positive (0 to height)
......@@ -2499,7 +2508,7 @@ public class ImageDtt extends ImageDttCPU {
}
}
// get CM disparity/strength
double [] disp_str = {0.0, 0.0}; // dispaprity = 0 will be initial approximation for LMA if no averaging
double [] disp_str = {0.0, 0.0}; // disparity = 0 will be initial approximation for LMA if no averaging
if (combine_corrs) {
double [] corr_combo_tile = correlation2d.accumulateInit(); // combine all available pairs
double sumw = 0.0;
......@@ -2592,9 +2601,16 @@ public class ImageDtt extends ImageDttCPU {
System.out.println("Will run new LMA for tileX="+tileX+", tileY="+tileY);
}
double [] poly_disp = {Double.NaN, 0.0};
double [] debug_lma_tile = (debug_lma != null) ? (new double [debug_lma.length]):null;
if (debug_lma_tile != null) {
for (int i = 0; i < debug_lma.length; i++) {
debug_lma_tile[i] = debug_lma[i][nTile];
}
}
Corr2dLMA lma2 = correlation2d.corrLMA2Single( // null pointer
imgdtt_params, // ImageDttParameters imgdtt_params,
imgdtt_params.lmas_LY_single, // false, // boolean adjust_ly, // adjust Lazy Eye
imgdtt_params.lmas_LY_single, // false, // boolean adjust_ly, // adjust Lazy Eye
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // corrs, // double [][] corrs,
......@@ -2605,9 +2621,12 @@ public class ImageDtt extends ImageDttCPU {
disp_str, //corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
poly_disp, // double[] poly_ds, // null or pair of disparity/strength
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
-2, //0, // tile_lma_debug_level, // +2, // int debug_level,
debug_lma_tile, // double [] debug_lma_tile,
(debugTile0 ? 1: -2), // int debug_level,
// -2, //0, // tile_lma_debug_level, // +2, // int debug_level,
tileX, // int tileX, // just for debug output
tileY ); // int tileY
tileY );
// int tileY
if (debugTile0) { // should be debugTile
System.out.println("Ran LMA for tileX="+tileX+", tileY="+tileY);
}
......@@ -2659,14 +2678,31 @@ public class ImageDtt extends ImageDttCPU {
}
}
}
}
if (debug_lma_tile != null) {
for (int i = 0; i < debug_lma.length; i++) {
debug_lma[i][nTile] = debug_lma_tile[i];
}
}
}
}
}
};
}
startAndJoin(threads);
if (debug_lma != null) {
(new ShowDoubleFloatArrays()).showArrays(
debug_lma,
tilesX,
tilesY,
true,
"lma_debug",
new String[] {"disp_samples","num_cnvx_samples","num_comb_samples", "num_lmas","num_iters","rms"}
);
}
}
return;
}
......
......@@ -77,7 +77,17 @@ public class ImageDttParameters {
public int dbg_pair_mask = 0x3f; // which pairs to combine
public int corr_strip_hight = 9; // number of rows to calculate
//lmamask_
public boolean lmamask_dbg = false; // show LMA images, exit after single BG
public boolean lmamask_en = false; // Use disparity-based LMA samples filter
public double lmamask_magic = 0.85;
public double lmamask_min_main = 0.4;
public double lmamask_min_neib = 0.10;
public double lmamask_weight_neib = 0.75;
public double lmamask_weight_neib_neib = 0.5;
// Extracting bi-convex (convex in both orthogonal directions) cells and allowing non-convex on the selection border only
public boolean cnvx_en = true; // Use convex-based LMA samples filter
public boolean cnvx_or = false; // If both lmamask_en and cnvx_en are available, use max; if false - multiply masks
public int cnvx_hwnd_size = 4; // half window size (both horizontal and vertical to extract bi-convex cells
public double cnvx_weight = 0.5; // relative weight of non-convex (border) cell
public boolean cnvx_add3x3 = true; // always select 3x3 cells around integer maximum
......@@ -439,7 +449,27 @@ public class ImageDttParameters {
gd.addNumericField("Number of correlation rows to combine (strip height)", this.corr_strip_hight, 0, 3, "",
"Number of rows to combine/interpolate correlation results. Rows are twice denser than pixels correponding to largest baseline disparity");
gd.addMessage("LMA samples filter based on estimated disparity");
gd.addCheckbox ("Debug LMA", this.lmamask_dbg,
"Generate debug images and exit after first clt_process_tl_correlations() while generating background image");
gd.addCheckbox ("Use disparity-based LMA samples filtering", this.lmamask_en,
"Generate weighs by averaging 2D correlation shape and per-pair shifting for estimated from CM disparity");
gd.addNumericField("Divide estimated disparity by magic 0.85", this.lmamask_magic, 6,8,"",
"Increase estimated disparity before averaging correlation shape and per-pair shifting the result");
gd.addNumericField("Minimal relative sample value for unconditional inclusion", this.lmamask_min_main, 6,8,"",
"Relatrive (to maximal) value in averaged correlation to be assigned window vlaue of 1.0 regardless of neighbors");
gd.addNumericField("Minimal relative sample value for neighbor inclusion", this.lmamask_min_neib, 6,8,"",
"Minimal relative sample value for conditional inclusion (if it has unconditional neighbor");
gd.addNumericField("Neighbor weight", this.lmamask_weight_neib, 6,8,"",
"Assign window value for strong enough values of neighbors of unconditionally included");
gd.addNumericField("Neighbor of neighbor weights", this.lmamask_weight_neib_neib, 6,8,"",
"Weight of neighbors of conditionally or anconditionally included poins regardless of their values");
gd.addMessage("LMA samples filter based on convex sample values");
gd.addCheckbox ("Use convex-based LMA samples filtering", this.cnvx_en,
"Select LMA samples based on convex correlation samples");
gd.addCheckbox ("'OR' window functions", this.cnvx_or,
"If both lmamask_en and cnvx_en are available, use max(); if false - multiply masks");
gd.addNumericField("Half window size to extract bi-convex cells", this.cnvx_hwnd_size, 0, 3, "pix",
"Create selection mask for quadratic approximation inside square around initial maximum position, specify distance from the center");
gd.addNumericField("Relative weight of non-convex (border) cell", this.cnvx_weight, 6,8,"",
......@@ -826,7 +856,16 @@ public class ImageDttParameters {
this.dbg_pair_mask= (int) gd.getNextNumber();
this.corr_strip_hight= (int) gd.getNextNumber();
this.lmamask_dbg = gd.getNextBoolean();
this.lmamask_en = gd.getNextBoolean();
this.lmamask_magic = gd.getNextNumber();
this.lmamask_min_main = gd.getNextNumber();
this.lmamask_min_neib = gd.getNextNumber();
this.lmamask_weight_neib = gd.getNextNumber();
this.lmamask_weight_neib_neib = gd.getNextNumber();
this.cnvx_en = gd.getNextBoolean();
this.cnvx_or = gd.getNextBoolean();
this.cnvx_hwnd_size= (int) gd.getNextNumber();
this.cnvx_weight = gd.getNextNumber();
this.cnvx_add3x3 = gd.getNextBoolean();
......@@ -1035,6 +1074,15 @@ public class ImageDttParameters {
properties.setProperty(prefix+"dbg_pair_mask", this.dbg_pair_mask +"");
properties.setProperty(prefix+"corr_strip_hight", this.corr_strip_hight +"");
properties.setProperty(prefix+"lmamask_dbg", this.lmamask_dbg +"");
properties.setProperty(prefix+"lmamask_en", this.lmamask_en +"");
properties.setProperty(prefix+"lmamask_magic", this.lmamask_magic +"");
properties.setProperty(prefix+"lmamask_min_main", this.lmamask_min_main +"");
properties.setProperty(prefix+"lmamask_min_neib", this.lmamask_min_neib +"");
properties.setProperty(prefix+"lmamask_weight_neib", this.lmamask_weight_neib +"");
properties.setProperty(prefix+"lmamask_weight_neib_neib", this.lmamask_weight_neib_neib +"");
properties.setProperty(prefix+"cnvx_en", this.cnvx_en +"");
properties.setProperty(prefix+"cnvx_or", this.cnvx_or +"");
properties.setProperty(prefix+"cnvx_hwnd_size", this.cnvx_hwnd_size +"");
properties.setProperty(prefix+"cnvx_weight", this.cnvx_weight +"");
properties.setProperty(prefix+"cnvx_add3x3", this.cnvx_add3x3 +"");
......@@ -1248,6 +1296,17 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"dbg_pair_mask")!=null) this.dbg_pair_mask=Integer.parseInt(properties.getProperty(prefix+"dbg_pair_mask"));
if (properties.getProperty(prefix+"corr_strip_hight")!=null) this.corr_strip_hight=Integer.parseInt(properties.getProperty(prefix+"corr_strip_hight"));
if (properties.getProperty(prefix+"lmamask_dbg")!=null) this.lmamask_dbg=Boolean.parseBoolean(properties.getProperty(prefix+"lmamask_dbg"));
if (properties.getProperty(prefix+"lmamask_en")!=null) this.lmamask_en=Boolean.parseBoolean(properties.getProperty(prefix+"lmamask_en"));
if (properties.getProperty(prefix+"lmamask_magic")!=null) this.lmamask_magic=Double.parseDouble(properties.getProperty(prefix+"lmamask_magic"));
if (properties.getProperty(prefix+"lmamask_min_main")!=null) this.lmamask_min_main=Double.parseDouble(properties.getProperty(prefix+"lmamask_min_main"));
if (properties.getProperty(prefix+"lmamask_min_neib")!=null) this.lmamask_min_neib=Double.parseDouble(properties.getProperty(prefix+"lmamask_min_neib"));
if (properties.getProperty(prefix+"lmamask_weight_neib")!=null) this.lmamask_weight_neib=Double.parseDouble(properties.getProperty(prefix+"lmamask_weight_neib"));
if (properties.getProperty(prefix+"lmamask_weight_neib_neib")!=null) this.lmamask_weight_neib_neib=Double.parseDouble(properties.getProperty(prefix+"lmamask_weight_neib_neib"));
if (properties.getProperty(prefix+"cnvx_en")!=null) this.cnvx_en=Boolean.parseBoolean(properties.getProperty(prefix+"cnvx_en"));
if (properties.getProperty(prefix+"cnvx_or")!=null) this.cnvx_or=Boolean.parseBoolean(properties.getProperty(prefix+"cnvx_or"));
if (properties.getProperty(prefix+"cnvx_hwnd_size")!=null) this.cnvx_hwnd_size=Integer.parseInt(properties.getProperty(prefix+"cnvx_hwnd_size"));
if (properties.getProperty(prefix+"cnvx_weight")!=null) this.cnvx_weight=Double.parseDouble(properties.getProperty(prefix+"cnvx_weight"));
if (properties.getProperty(prefix+"cnvx_add3x3")!=null) this.cnvx_add3x3=Boolean.parseBoolean(properties.getProperty(prefix+"cnvx_add3x3"));
......@@ -1477,6 +1536,15 @@ public class ImageDttParameters {
idp.dbg_pair_mask= this.dbg_pair_mask;
idp.corr_strip_hight= this.corr_strip_hight;
idp.lmamask_dbg= this.lmamask_dbg;
idp.lmamask_en= this.lmamask_en;
idp.lmamask_magic= this.lmamask_magic;
idp.lmamask_min_main= this.lmamask_min_main;
idp.lmamask_min_neib= this.lmamask_min_neib;
idp.lmamask_weight_neib= this.lmamask_weight_neib;
idp.lmamask_weight_neib_neib= this.lmamask_weight_neib_neib;
idp.cnvx_en= this.cnvx_en;
idp.cnvx_or= this.cnvx_or;
idp.cnvx_hwnd_size= this.cnvx_hwnd_size;
idp.cnvx_weight= this.cnvx_weight;
idp.cnvx_add3x3= this.cnvx_add3x3;
......
......@@ -28,7 +28,9 @@ import java.awt.Rectangle;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.DoubleAccumulator;
......@@ -2306,6 +2308,7 @@ public class OpticalFlow {
scene_QuadClt.getGeometryCorrection().getSensorWH()[0],
!scene_QuadClt.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene_QuadClt.getGeometryCorrection(), // final GeometryCorrection geometryCorrection,
scene_disparity_cor, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
......@@ -2531,6 +2534,272 @@ public class OpticalFlow {
return pXpYD;
}
//TODO: refine inter-scene pose to accommodate refined disparity map
/**
* Removing BG tiles that are not visible because of the FG ones
* @param tp TileProcessor instance to get image dimensions
* @param pXpYD Array of pX, pY, Disparity triplets for the current camera calculated from the reference 3D model
* @param max_overlap maximal area overlap (for the full 16x16 image tiles) that allows the BG tile to be kept
* @param pXpYD_cam optional array of this camera disparity map to "cast shadows" from the objects that are not visible
* in the reference (accurate) 3D model TODO: pre-filter to remove those that should be visible in pXpYD? At least remove
* low-confidence triplets.
* @param min_adisp_cam minimal absolute disparity difference for pXpYD_cam to consider
* @param min_rdisp_cam minimal relative disparity difference for pXpYD_cam to consider
* @param debug_level debug level
* @return copy of pXpYD with occluded elements nulled
*/
public double [][] filterBG (
final TileProcessor tp,
final double [][] pXpYD,
final double max_overlap,
// final double [][] pXpYD_cam,
final double [] disparity_cam,
// final double min_str_cam,
final double min_adisp_cam,
final double min_rdisp_cam,
final int dbg_tileX,
final int dbg_tileY,
final int debug_level
){
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
final int dbg_nTile = dbg_tileY * tilesX + dbg_tileX;
final int tileSize = tp.getTileSize();
// final double tileSize2 = tileSize * 2;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int tiles = tilesX*tilesY;
final TileNeibs tn = new TileNeibs(tilesX, tilesY);
ArrayList<List<Integer>> fg_bg_list = new ArrayList<List<Integer>> (tiles);
for (int i = 0; i < tiles; i++) {
fg_bg_list.add(Collections.synchronizedList(new ArrayList<Integer>()));
}
final int offs_range = 1; // (max_overlap < 0.5) ? 2 : 1;
final double[][] pXpYD_filtered = pXpYD.clone();
final AtomicInteger ai_num_tiles = new AtomicInteger(0);
final AtomicInteger ai_num_removed = new AtomicInteger(0);
final int overlap_radius = 4; // 9x9
final int overlap_diameter = 2 * overlap_radius + 1; // 9
final int overlap_size = overlap_diameter * overlap_diameter; // 81
final double scale_dist = 2.00 * overlap_radius / tileSize; // 1.0 for overlap_radius==4
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int indx = ai.getAndIncrement(); indx < pXpYD.length; indx = ai.getAndIncrement()) if (pXpYD[indx] != null) {
int tx = (int)Math.round(pXpYD[indx][0]/tileSize);
int ty = (int)Math.round(pXpYD[indx][1]/tileSize);
if ((debug_level > 0) && (tx == dbg_tileX) && (ty == dbg_tileY)) {
System.out.println("filterBG(): tx = "+tx+", ty="+ty+", indx="+indx);
System.out.print("");
}
if ((tx >=0) && (ty >=0) && (tx < tilesX) && (ty < tilesY)) {
int nTile = ty * tilesX + tx;
synchronized(fg_bg_list.get(nTile)) {
fg_bg_list.get(nTile).add(indx);
}
ai_num_tiles.getAndIncrement();
} else {
pXpYD_filtered[indx] = null;
}
}
}
};
}
ImageDtt.startAndJoin(threads);
ai.set(0);
// filter by the reference model
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
boolean [] overlap_staging = new boolean [overlap_size];
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) if (fg_bg_list.get(nTile).size() > 0) {
for (int tindx_bg: fg_bg_list.get(nTile)) {
double [] txyd_bg = pXpYD[tindx_bg];
// final int offs_range = (max_overlap < 0.5) ? 2 : 1;
if ((debug_level > -1) && (tindx_bg == dbg_nTile)) {
System.out.println("filterBG(): tindx_bg="+tindx_bg+", nTile = "+nTile+", txyd_bg[0]="+txyd_bg[0]+", txyd_bg[1]="+txyd_bg[1]+", txyd_bg[2]="+txyd_bg[2]);
System.out.print("");
}
Arrays.fill(overlap_staging, false);
boolean some_overlap = false;
for (int dty = -offs_range; dty <= offs_range; dty++) {
for (int dtx = -offs_range; dtx <= offs_range; dtx++) {
int nTile_fg = tn.getNeibIndex(nTile, dtx, dty);
if (nTile_fg >= 0) for (int tindx_fg: fg_bg_list.get(nTile_fg)) if (tindx_fg != tindx_bg){
double [] txyd_fg = pXpYD[tindx_fg];
// check if FG is closer than BG (here does not have to be significantly closer
if (txyd_fg[2] > txyd_bg[2]) {
// see if there is any overlap
double x_fg_bg = txyd_fg[0] - txyd_bg[0];
double y_fg_bg = txyd_fg[1] - txyd_bg[1];
if ((Math.abs(x_fg_bg) < tileSize) && (Math.abs(y_fg_bg) < tileSize)) {
applyOverlap(
overlap_staging, // boolean [] staging,
overlap_diameter, // int overlap_diameter,
scale_dist, // double scale_dist,
x_fg_bg, // double dx,
y_fg_bg); // double dy
some_overlap = true;
}
}
}
}
}
// apply camera disparity map
if (disparity_cam != null) {
double ddisp = min_adisp_cam + txyd_bg[2] * min_rdisp_cam;
int tx = (int) Math.round(txyd_bg[0]/tileSize);
int ty = (int) Math.round(txyd_bg[1]/tileSize);
// Limit to 0.. max?
int nTile_fg_center = ty * tilesX + tx;
for (int dty = -offs_range; dty <= offs_range; dty++) {
for (int dtx = -offs_range; dtx <= offs_range; dtx++) {
int nTile_fg = tn.getNeibIndex(nTile_fg_center, dtx, dty);
if ((nTile_fg >= 0) && (disparity_cam[nTile_fg] - txyd_bg[2] > ddisp)){
double x_fg_bg = (tx + dtx + 0.5) * tileSize - txyd_bg[0];
double y_fg_bg = (ty + dty + 0.5) * tileSize - txyd_bg[1];
if ((Math.abs(x_fg_bg) < tileSize) && (Math.abs(y_fg_bg) < tileSize)) {
applyOverlap(
overlap_staging, // boolean [] staging,
overlap_diameter, // int overlap_diameter,
scale_dist, // double scale_dist,
x_fg_bg, // double dx,
y_fg_bg); // double dy
some_overlap = true;
}
}
}
}
}
if (some_overlap) { // count actual overlap
int nuv_overlap = 0;
for (boolean staging_point: overlap_staging) {
if (staging_point) {
nuv_overlap++;
}
}
double frac_overlap = 1.0 * nuv_overlap / overlap_staging.length;
if (frac_overlap> max_overlap) {
ai_num_removed.getAndIncrement();
pXpYD_filtered[tindx_bg] = null; // OK that it still remains in the lists
}
}
/*
double overlapX = Math.max(tileSize2 - Math.abs(txyd_fg[0] - txyd_bg[0]), 0)/tileSize2;
double overlapY = Math.max(tileSize2 - Math.abs(txyd_fg[1] - txyd_bg[1]), 0)/tileSize2;
if ((overlapX * overlapY) > max_overlap) { // remove BG tile
pXpYD_filtered[tindx_bg] = null; // OK that it still remains in the lists
ai_num_removed_ref.getAndIncrement();
if ((debug_level > -1) && (nTile==dbg_nTile)) {
System.out.println("+++++++++++++++ filterBG(): nTile = "+nTile+
", txyd_bg[0]="+txyd_bg[0]+", txyd_bg[1]="+txyd_bg[1]+", txyd_bg[2]="+txyd_bg[2]+
", txyd_fg[0]="+txyd_fg[0]+", txyd_fg[1]="+txyd_fg[1]+", txyd_fg[2]="+txyd_fg[2]);
System.out.print("");
}
}
*/
}
}
}
};
}
ImageDtt.startAndJoin(threads);
/*
// Maybe remove here from the list tiles that are removed from pXpYD_filtered?
if (pXpYD_cam != null) {
ai.set(0);
// filter by the reference model
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int tindx_fg = ai.getAndIncrement(); tindx_fg < pXpYD_cam.length; tindx_fg = ai.getAndIncrement()) if (pXpYD_cam[tindx_fg] != null) {
double [] txyd_fg = pXpYD[tindx_fg];
double ddisp = min_adisp_cam + txyd_fg[2] * min_rdisp_cam;
int tx = (int)Math.round(txyd_fg[0]/tileSize);
int ty = (int)Math.round(txyd_fg[1]/tileSize);
if ((tx >=0) && (ty >=0) && (tx < tilesX) && (ty < tilesY)) {
int nTile_fg = ty * tilesX + tx;
for (int dy = -offs_range; dy <= offs_range; dy++) {
for (int dx = -offs_range; dx <= offs_range; dx++) {
int nTile_bg = tn.getNeibIndex(nTile_fg, dx, dy);
if (nTile_bg >= 0) for (int tindx_bg: fg_bg_list.get(nTile_bg)){
double [] txyd_bg = pXpYD[tindx_bg];
if ((txyd_fg[2] - txyd_bg[2]) > ddisp) { // FG is significantly closer than BG
double overlapX = Math.max(tileSize2 - Math.abs(txyd_fg[0] - txyd_bg[0]), 0)/tileSize2;
double overlapY = Math.max(tileSize2 - Math.abs(txyd_fg[1] - txyd_bg[1]), 0)/tileSize2;
if ((overlapX * overlapY) > max_overlap) { // remove BG tile
pXpYD_filtered[tindx_bg] = null; // OK that it still remains in the lists
ai_num_removed_cam.getAndIncrement();
}
}
}
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
}
*/
if (debug_level > -1){
System.out.println("filterBG(): num_all_tiles = "+ai_num_tiles.get()+
", num_removed="+ ai_num_removed.get()+
", remaining tiles="+(ai_num_tiles.get() - ai_num_removed.get()));
System.out.print("");
}
return pXpYD_filtered;
}
private void applyOverlap(
boolean [] staging,
int overlap_diameter,
double scale_dist,
double dx,
double dy
) {
int ix0 = (int) Math.round(dx * scale_dist);
int ix1 = ix0 + overlap_diameter;
int iy0 = (int) Math.round(dy * scale_dist);
int iy1 = iy0 + overlap_diameter;
if (ix0 < 0) ix0 = 0;
if (ix1 > overlap_diameter) ix1 = overlap_diameter;
if (iy0 < 0) iy0 = 0;
if (iy1 > overlap_diameter) iy1 = overlap_diameter;
for (int iy = iy0; iy < iy1; iy++) {
int line_start = iy * overlap_diameter;
Arrays.fill(staging, line_start + ix0, line_start + ix1, true);
}
}
/*
ArrayList<Integer> neib_list = new ArrayList<Integer>(20);
// here each tlist is accessed exclusively
Collections.sort(fg_bg_list.get(nTile), new Comparator<Integer>() {
@Override
public int compare(Integer lhs, Integer rhs) { // descending // ascending
return pXpYD[lhs][2] > pXpYD[rhs][2] ? -1 : (pXpYD[lhs][2] < pXpYD[rhs][2] ) ? 1 : 0;
}
});
*
List list = Collections.synchronizedList(new ArrayList());
...
synchronized(list) {
Iterator i = list.iterator(); // Must be in synchronized block
while (i.hasNext())
foo(i.next());
}
*/
public double [][] transformFromScenePxPyD(
final double [][] pXpYD_scene, // tiles correspond to reference, pX,pY,D - for scene
......@@ -3470,11 +3739,13 @@ public class OpticalFlow {
/// Runtime.getRuntime().gc();
/// System.out.println("--- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
int mcorr_sel = Correlation2d.corrSelEncode(clt_parameters.img_dtt,scenes[indx_ref].getNumSensors());
// FIXME: null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
disparity_map = correlateInterscene(
clt_parameters, // final CLTParameters clt_parameters,
scenes, // final QuadCLT [] scenes,
indx_ref, // final int indx_ref,
combo_dsn_change[0], // final double [] disparity_ref, // disparity in the reference view tiles (Double.NaN - invalid)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
nrefine, // final int nrefine, // just for debug title
clt_parameters.inp.show_final_2d, // final boolean show_2d_corr,
......@@ -3975,7 +4246,7 @@ public class OpticalFlow {
// empiric correction for both lma and non-lma step
double corr_nonlma = 1.0; // 1.23;
double corr_lma = 1.0; // 1.23;
// reference scene is always added to tghe end, even is out of timestamp order
// reference scene is always added to the end, even is out of timestamp order
int indx_ref = scenes.length - 1; // Always added to the end even if out-of order
QuadCLT ref_scene = scenes[indx_ref]; // ordered by increasing timestamps
boolean generate_outlines = false; // true; // TODO: move to configs
......@@ -4105,32 +4376,42 @@ public class OpticalFlow {
double [][] dbg_corr_scale = null;
if (debug_level > 0) {
dbg_corr_scale = new double[max_refines][];
}
}
boolean [] selection = new boolean [target_disparity.length];
for (int i = 0; i < target_disparity.length; i++) {
selection[i] = !Double.isNaN(target_disparity[i]);
}
boolean [] selection_orig = selection.clone();
for (int nrefine = 0; nrefine < max_refines; nrefine++) {
if (nrefine == clt_parameters.rig.mll_max_refines_pre) {
min_disp_change = clt_parameters.rig.mll_min_disp_change_lma;
clt_parameters.img_dtt.setMcorr(num_sensors, save_pairs_selection); // restore
clt_parameters.correlate_lma = save_run_lma; // restore
/*
for (int nt = 0; nt < target_disparity.length; nt++) if (Double.isNaN(target_disparity[nt])){
if (!Double.isNaN(target_disparity_orig[nt])) {
target_disparity[nt] = combo_dsn_change[combo_dsn_indx_disp][nt];
}
}
*/
selection = selection_orig.clone();
if (debug_level > -2) {
int num_tomeas = 0;
for (int nt = 0; nt < target_disparity.length; nt++) if (!Double.isNaN(target_disparity[nt])){
for (int nt = 0; nt < target_disparity.length; nt++) if (selection[nt]) { // (!Double.isNaN(target_disparity[nt])){
num_tomeas++;
}
System.out.println ("nrefine pass = "+nrefine+", remaining "+num_tomeas+" tiles to re-measure");
}
}
int mcorr_sel = Correlation2d.corrSelEncode(clt_parameters.img_dtt,num_sensors);
// FIXME: null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
double [][] disparity_map =
correlateInterscene(
clt_parameters, // final CLTParameters clt_parameters,
scenes, // final QuadCLT [] scenes,
indx_ref, // final int indx_ref,
target_disparity, // combo_dsn_change[combo_dsn_indx_disp], // final double [] disparity_ref, // disparity in the reference view tiles (Double.NaN - invalid)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
nrefine, // final int nrefine, // just for debug title
false, // ( nrefine == (max_refines - 1)) && clt_parameters.inp.show_final_2d, // final boolean show_2d_corr,
......@@ -4154,7 +4435,8 @@ public class OpticalFlow {
double [] map_strength = disparity_map[ImageDtt.DISPARITY_STRENGTH_INDEX]; // 10
double [] map_disparity_lma = disparity_map[ImageDtt.DISPARITY_INDEX_POLY]; // 8
int num_tomeas = 0; // number of tiles to measure
Arrays.fill(target_disparity, Double.NaN);
// Arrays.fill(target_disparity, Double.NaN);
Arrays.fill(selection, false);
for (int nTile =0; nTile < combo_dsn_change[0].length; nTile++) {
if (defined_tiles[nTile]) { // originally defined, maybe not measured last time
if (!Double.isNaN(map_disparity[nTile])) { // re-measured
......@@ -4197,6 +4479,7 @@ public class OpticalFlow {
if (Math.abs(combo_dsn_change[combo_dsn_indx_change][nTile]) >= min_disp_change) {
target_disparity[nTile] = combo_dsn_change[combo_dsn_indx_disp][nTile] ;
selection[nTile] = true;
num_tomeas ++;
} else {
num_tomeas+=0;
......@@ -4379,11 +4662,13 @@ public class OpticalFlow {
if (save_accum) {
int mcorr_sel = Correlation2d.corrSelEncodeAll(0); // all sensors
float [][][] facc_2d_img = new float [1][][];
// FIXME: null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
correlateInterscene(
clt_parameters, // final CLTParameters clt_parameters,
scenes, // final QuadCLT [] scenes,
indx_ref, // final int indx_ref,
combo_dsn_change[0], // final double [] disparity_ref, // disparity in the reference view tiles (Double.NaN - invalid)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
-1, // final int nrefine, // just for debug title
false, // final boolean show_2d_corr,
......@@ -4869,12 +5154,14 @@ public class OpticalFlow {
/// Runtime.getRuntime().gc();
/// System.out.println("--- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
int mcorr_sel = Correlation2d.corrSelEncode(clt_parameters.img_dtt,scenes[indx_ref].getNumSensors());
// FIXME: null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
double [][] disparity_map =
correlateInterscene(
clt_parameters, // final CLTParameters clt_parameters,
scenes, // final QuadCLT [] scenes,
indx_ref, // final int indx_ref,
combo_dsn_change[0], // final double [] disparity_ref, // disparity in the reference view tiles (Double.NaN - invalid)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
nrefine, // final int nrefine, // just for debug title
( nrefine == (max_refines - 1)) && clt_parameters.inp.show_final_2d, // final boolean show_2d_corr,
......@@ -6088,6 +6375,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
final QuadCLT [] scenes,
final int indx_ref,
final double [] disparity_ref, // disparity in the reference view tiles (Double.NaN - invalid)
final boolean [] selection, // may be null, if not null do not process unselected tiles
final int margin,
final int nrefine, // just for debug title
final boolean show_2d_corr,
......@@ -6155,12 +6443,29 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
scene_ers_xyz_dt, // double [] ers_xyz_dt,
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
//setupERS() will be inside transformToScenePxPyD()
scene_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN
double [][] scene_pXpYD_prefilter = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scenes[nscene], // final QuadCLT scene_QuadClt,
ref_scene); // final QuadCLT reference_QuadClt)
double max_overlap = 0.6;
double [] disparity_cam = null; // for now
// double min_str_cam = 0.1;
double min_adisp_cam = 0.2;
double min_rdisp_cam = 0.03;
scene_pXpYD = filterBG (
scenes[indx_ref].getTileProcessor(), // final TileProcessor tp,
scene_pXpYD_prefilter, // final double [][] pXpYD,
max_overlap, // final double max_overlap,
disparity_cam, // final double [] disparity_cam,
min_adisp_cam, // final double min_adisp_cam,
min_rdisp_cam, // final double min_rdisp_cam,
clt_parameters.tileX, // final int dbg_tileX,
clt_parameters.tileY, // final int dbg_tileY,
0); // 1); //debug_level); // final int debug_level);
}
scenes[nscene].saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU)
final double gpu_sigma_corr = clt_parameters.getGpuCorrSigma(scenes[nscene].isMonochrome());
......@@ -6173,11 +6478,12 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
scenes[nscene].getErsCorrection().getSensorWH()[0],
!scenes[nscene].hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scenes[nscene].getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
threadsMax); // final int threadsMax) // maximal number of threads to launch
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
threadsMax); // final int threadsMax) // maximal number of threads to launch
if (nscene == indx_ref) {
tp_tasks_ref = tp_tasks; // will use coordinates data for LMA ? disp_dist
}
......@@ -6552,6 +6858,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
ref_scene.getErsCorrection().getSensorWH()[0],
!ref_scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
ref_scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
......@@ -7295,6 +7602,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
scene.getGeometryCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene.getGeometryCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
......
......@@ -3892,6 +3892,7 @@ public class QuadCLT extends QuadCLTCPU {
TpTask[] tp_tasks = gpuQuad.setInterTasks(
false, // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
geometryCorrection, // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
......
......@@ -8253,6 +8253,12 @@ public class QuadCLTCPU {
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevel);
if (clt_parameters.img_dtt.lmamask_dbg) {
System.out.println("Remove me - QCC8257");
return false;
}
tp.clt_3d_passes.add(bgnd_data);
// if (show_init_refine)
// if ((debugLevel > -2) && clt_parameters.show_first_bg) {
......@@ -12026,7 +12032,8 @@ public class QuadCLTCPU {
d = ImageDtt.setPairMask(d,0xf);
d = ImageDtt.setForcedDisparity(d,true);
int [][] tile_op = tp.setSameTileOp(clt_parameters, d, debugLevel);
double [][] disparity_array = tp.setSameDisparity(0.0); // [tp.tilesY][tp.tilesX] - individual per-tile expected disparity
double disparity0 = 0.0;
double [][] disparity_array = tp.setSameDisparity(disparity0); // [tp.tilesY][tp.tilesX] - individual per-tile expected disparity
scan.disparity = disparity_array;
scan.tile_op = tile_op;
CLTPass3d scan_rslt = CLTMeas( // perform single pass according to prepared tiles operations and disparity // USED in lwir
......@@ -12976,6 +12983,7 @@ public class QuadCLTCPU {
// When clt_mismatch is non-zero, no far objects extraction will be attempted
//optional, may be null
disparity_map, // final double [][] disparity_map, // [8][tilesY][tilesX], only [6][] is needed on input or null - do not calculate
// REMOVE 'true'
clt_parameters.correlate_lma, // final boolean run_lma, // calculate LMA, false - CM only
// define combining of all 2D correlation pairs for CM (LMA does not use them)
clt_parameters.img_dtt.mcorr_comb_width, //final int mcorr_comb_width, // combined correlation tile width (set <=0 to skip combined correlations)
......@@ -12986,7 +12994,7 @@ public class QuadCLTCPU {
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
threadsMax, // final int threadsMax, // maximal number of threads to launch
debugLevel + 2+0); // -1 ); // final int globalDebugLevel)
debugLevel + 2+1); // -1 ); // final int globalDebugLevel)
} else {
image_dtt.clt_process_tl_correlations( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td
......
......@@ -11766,6 +11766,11 @@ if (debugLevel > -100) return true; // temporarily !
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevelInner);
if (clt_parameters.img_dtt.lmamask_dbg) {
System.out.println("Remove me - TQC11770");
return;
}
if (quadCLT_main.correctionsParameters.clt_batch_dsi_aux_full) {
if (updateStatus) IJ.showStatus("Expanding DSI for the aux camera image set "+quadCLT_main.image_name+" (for DSI export)");
quadCLT_aux.expandCLTQuad3d( // returns ImagePlus, but it already should be saved/shown
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment