Commit 2a11eb19 authored by Andrey Filippov's avatar Andrey Filippov

Debugging LMA-based LY measurement, added initial LY estimation, more

model functions
parent 6293946d
......@@ -300,7 +300,14 @@ public class CLTParameters {
private double lym_change = 5e-5; // Parameter vector difference to exit 4e-6 - OK
private double lym_change_aux = 1e-4; // same for aux camera (currntly)lwir
public double lym_poly_change = 0.002; // Parameter vector difference to exit from polynomial correction
public boolean lym_mod_map = true; // Modify preliminary disparity map before running LY
public boolean lym_top_bg = true; // All above found BG is also BG (valid for most, but not all scenes)
public int lym_fill_gaps_bg = 0; // fill small gaps in found background
public int lym_fill_gaps_combo = 2; // 1 - in 4 directions by 1, 2 - in 8 directions by 1,
public boolean lym_use_strength = true; // Use strength when averaging disparity from neighbors
public double lym_scale_deriv_str=0.5; // Scale strength of the interpolated tiles from average neighbors strength
public boolean lyf_filter = false; // Filter lazy eye pairs by their values
public int lyf_smpl_side = 3; // 8 x8 masked, 16x16 sampled
public double lyf_rms_max = 0.1; // Maximal RMS (all components to components average)
......@@ -1190,15 +1197,20 @@ public class CLTParameters {
properties.setProperty(prefix+"ly_disp_rvar_gt", this.ly_disp_rvar_gt +"");
properties.setProperty(prefix+"ly_norm_disp", this.ly_norm_disp +"");
properties.setProperty(prefix+"lym_overexp", this.lym_overexp +"");
properties.setProperty(prefix+"lym_dbg_path", this.lym_dbg_path +"");
properties.setProperty(prefix+"lym_update_disp", this.lym_update_disp+"");
properties.setProperty(prefix+"lym_iter", this.lym_iter+"");
properties.setProperty(prefix+"lym_change", this.lym_change +"");
properties.setProperty(prefix+"lym_change_aux", this.lym_change_aux +"");
properties.setProperty(prefix+"lym_poly_change", this.lym_poly_change +"");
properties.setProperty(prefix+"lym_mod_map", this.lym_mod_map +"");
properties.setProperty(prefix+"lym_top_bg", this.lym_top_bg +"");
properties.setProperty(prefix+"lym_fill_gaps_bg", this.lym_fill_gaps_bg +"");
properties.setProperty(prefix+"lym_fill_gaps_combo", this.lym_fill_gaps_combo +"");
properties.setProperty(prefix+"lym_use_strength", this.lym_use_strength +"");
properties.setProperty(prefix+"lym_scale_deriv_str", this.lym_scale_deriv_str +"");
properties.setProperty(prefix+"lyf_filter", this.lyf_filter+"");
properties.setProperty(prefix+"lyf_smpl_side", this.lyf_smpl_side+"");
properties.setProperty(prefix+"lyf_rms_max", this.lyf_rms_max +"");
......@@ -2007,9 +2019,15 @@ public class CLTParameters {
if (properties.getProperty(prefix+"lym_iter")!=null) this.lym_iter=Integer.parseInt(properties.getProperty(prefix+"lym_iter"));
if (properties.getProperty(prefix+"lym_change")!=null) this.lym_change=Double.parseDouble(properties.getProperty(prefix+"lym_change"));
if (properties.getProperty(prefix+"lym_change_aux")!=null) this.lym_change_aux=Double.parseDouble(properties.getProperty(prefix+"lym_change_aux"));
if (properties.getProperty(prefix+"lym_poly_change")!=null) this.lym_poly_change=Double.parseDouble(properties.getProperty(prefix+"lym_poly_change"));
if (properties.getProperty(prefix+"lym_mod_map")!=null) this.lym_mod_map=Boolean.parseBoolean(properties.getProperty(prefix+"lym_mod_map"));
if (properties.getProperty(prefix+"lym_top_bg")!=null) this.lym_top_bg=Boolean.parseBoolean(properties.getProperty(prefix+"lym_top_bg"));
if (properties.getProperty(prefix+"lym_fill_gaps_bg")!=null) this.lym_fill_gaps_bg=Integer.parseInt(properties.getProperty(prefix+"lym_fill_gaps_bg"));
if (properties.getProperty(prefix+"lym_fill_gaps_combo")!=null) this.lym_fill_gaps_combo=Integer.parseInt(properties.getProperty(prefix+"lym_fill_gaps_combo"));
if (properties.getProperty(prefix+"lym_use_strength")!=null) this.lym_use_strength=Boolean.parseBoolean(properties.getProperty(prefix+"lym_use_strength"));
if (properties.getProperty(prefix+"lym_scale_deriv_str")!=null) this.lym_scale_deriv_str=Double.parseDouble(properties.getProperty(prefix+"lym_scale_deriv_str"));
if (properties.getProperty(prefix+"lyf_filter")!=null) this.lyf_filter=Boolean.parseBoolean(properties.getProperty(prefix+"lyf_filter"));
if (properties.getProperty(prefix+"lyf_smpl_side")!=null) this.lyf_smpl_side=Integer.parseInt(properties.getProperty(prefix+"lyf_smpl_side"));
if (properties.getProperty(prefix+"lyf_rms_max")!=null) this.lyf_rms_max=Double.parseDouble(properties.getProperty(prefix+"lyf_rms_max"));
......@@ -2908,9 +2926,23 @@ public class CLTParameters {
gd.addNumericField("Maximal number of iterations", this.lym_iter, 0);
gd.addNumericField("Parameter vector difference to exit (main camera)", this.lym_change, 10,12,"");
gd.addNumericField("Parameter vector difference to exit (aux camera)", this.lym_change_aux, 10,12,"");
gd.addNumericField("Parameter vector difference to exit from polynomial correction", this.lym_poly_change, 10);
gd.addMessage ("--- Lazy eye data preparation ---");
gd.addCheckbox ("Modify preliminary disparity map before running LY", this.lym_mod_map,
"LY may tolerate some disparity errors when just pure LY is needed");
gd.addCheckbox ("Consider all above infinity to be infinity", this.lym_top_bg,
"Valid for many, but not all scenes, such as tree branches over sky background");
gd.addNumericField("Fill gaps in background selection", this.lym_fill_gaps_bg, 0, 2, "pix",
"1 - 1 pixel in 4 directions, 2 - 1 step in 8 directions, ...");
gd.addNumericField("Fill gaps in combo selection", this.lym_fill_gaps_combo, 0, 2, "pix",
"1 - 1 pixel in 4 directions, 2 - 1 step in 8 directions, ...");
gd.addCheckbox ("Use strength when averaging disparity from neighbors", this.lym_use_strength,
"Weight by strength when averaging missing disparity tiles, false - use equal weights");
gd.addNumericField("Scale strength of the interpolated tiles from average neighbors strength", this.lym_scale_deriv_str, 3, 6, "",
"Multiply average strength of neighbors when assigning to a missing tile");
gd.addMessage ("--- Lazy eye samples filter ---");
gd.addCheckbox ("Filter lazy eye pairs by their values", this.lyf_filter);
gd.addNumericField("Fileter sample side (if 8, 8 x8 masked, 16x16 sampled)", this.lyf_smpl_side, 0);
......@@ -3850,6 +3882,13 @@ public class CLTParameters {
this.lym_change_aux= gd.getNextNumber();
this.lym_poly_change= gd.getNextNumber();
this.lym_mod_map= gd.getNextBoolean();
this.lym_top_bg= gd.getNextBoolean();
this.lym_fill_gaps_bg = (int) gd.getNextNumber();
this.lym_fill_gaps_combo = (int) gd.getNextNumber();
this.lym_use_strength= gd.getNextBoolean();
this.lym_scale_deriv_str = gd.getNextNumber();
this.lyf_filter= gd.getNextBoolean();
this.lyf_smpl_side= (int) gd.getNextNumber();
......
......@@ -504,6 +504,11 @@ public class CLTPass3d{
}
return strength;
}
public void setStrength(double [] strength) {
this.strength = strength;
}
/**
* Get four pairs (original) correlation strength. Not a copy
* @return line-scan array of per-tile horizontal pairs correlation strength by reference (not a copy)
......
......@@ -78,7 +78,7 @@ public class Corr2dLMA {
/// final static int NUM_CAMS = 4; // not all have to be used, so it is maximal number of cameras
/// final static int NUM_PAIRS = NUM_CAMS* (NUM_CAMS -1)/2; // number of possible pairs
final static int NTILE0 = 0;
final static int DISP_INDEX = 0; // common/average disparity
final static int A_INDEX = 1; // A*(x-x0)^2
final static int B_INDEX = 2; // 2*B*(x-x0)*(y-y0)
......@@ -132,7 +132,7 @@ public class Corr2dLMA {
private int [] npairs; // number of used pairs per tile
private int last_cam; // index of the last camera (special treatment for disparity correction)
private int pre_last_cam; // index of the pre-last camera (special treatment for disparity correction)
private Matrix [][] m_disp;
private Matrix [][] m_disp;
// private Matrix [][][] m_pairs;
// private Matrix [][][] m_pairs_inv; // inverted m_pairs to calculate x,y -> dd,nd for initial disparity calculation
......@@ -145,7 +145,8 @@ public class Corr2dLMA {
private int numTiles = 1;
private Matrix mddnd; // Matrix to calculate 2 last corrections in disparity direction and 1 ortho from the first ones (normally 2+3=5)
private boolean gaussian_mode = true;
// private boolean gaussian_mode = true;
private int gaussian_mode = 1; // 0 - parabola, 1 - gaussian, 2 - limited parabola, 4 - limited squared parabola
private boolean lazy_eye; // calculate parameters/derivatives for the "lazy eye" parameters
private double [][] rXY;
private int [] dd_indices; //normally 5-long (2 * ncam -3), absolute parameter indices for dd_pre_last, dd_last and nd_last
......@@ -196,7 +197,7 @@ public class Corr2dLMA {
int ts, // null - use default table
double [][] corr_wnd, // may be null
double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
boolean gaussian_mode
int gaussian_mode // 0 - parabola, 1 - Gaussian, 2 - limited parabola, 3 - limited squared parabola
) {
this.correlation2d = correlation2d;
this.num_cams = correlation2d.getNumSensors();
......@@ -437,19 +438,8 @@ public class Corr2dLMA {
return np;
}
@Deprecated
public int getPairIndex(int f, int s) {
if (f > s) {
int t = f;
f = s;
s = t;
}
return (num_cams * f) - (f + 1)*f/2 - f - 1 + s ; // return n*i - i*(i+1)//2 - i + j -1
}
public void setMatrices(double [][] am_disp) {
m_disp = new Matrix[1][num_cams];
for (int n = 0; n < num_cams; n++) {
......@@ -515,6 +505,119 @@ public class Corr2dLMA {
}
}
public boolean setInitialLYOffsets(
double [][] pair_centers,
double step_weight, // scale corrections
double min_correction, // exit when maximal XY correction is below
boolean debug) {
if (pair_centers == null) {
return false;
}
// Verify that all cameras are used
for (int ncam = 0; ncam <num_cams; ncam++) if (!used_cameras[ncam]){
return false; // not all cameras present
}
boolean apply = true; // false;
double min_corr2 = min_correction * min_correction;
Matrix[] xy_cam_disp = new Matrix [num_cams]; // per camera - x,y without LY - just from disparity
// Matrix ddnd_disp = new Matrix(new double [] {all_pars[DISP_INDEX + NTILE0*tile_params], 0.0}, 2);
Matrix ddnd_disp = new Matrix(new double [] {-all_pars[DISP_INDEX + NTILE0*tile_params], 0.0}, 2);
for (int ncam = 0; ncam < num_cams; ncam++) {
xy_cam_disp[ncam] = m_disp[NTILE0][ncam].times(ddnd_disp);
}
Matrix[] m_pair_centers = new Matrix[pair_centers.length];
for (int pair = 0; pair < num_pairs; pair++) if (pair_centers[pair] != null) {
m_pair_centers[pair] = new Matrix (new double[] {pair_centers[pair][0], pair_centers[pair][1]},2);
int [] se =correlation2d.getPair(pair);
m_pair_centers[pair].plusEquals(xy_cam_disp[se[0]]).minusEquals(xy_cam_disp[se[1]]); // TODO: verify signs
}
// m_pair_centers - measured xy pair centers, corrected for disparity, so sum(dd) should be 0;
Matrix [] xy_ly = new Matrix [num_cams];
Matrix [] xy_ly_new = new Matrix [num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
xy_ly[ncam] = new Matrix(2,1); // zero [x,Y] column
}
int max_try = 101;
double [] sum_w = new double [num_cams];
for (int ntry = 0; ntry < max_try; ntry++) {
// reset new vectors
for (int ncam = 0; ncam < num_cams; ncam++) {
xy_ly_new[ncam] = new Matrix(2,1); // zero [x,Y] column
}
for (int pair = 0; pair < num_pairs; pair++) if (m_pair_centers[pair] != null){
int [] se =correlation2d.getPair(pair);
xy_ly_new[se[1]].plusEquals(xy_ly[se[0]].plus (m_pair_centers[pair]).times(pair_centers[pair][2]));
xy_ly_new[se[0]].plusEquals(xy_ly[se[1]].minus(m_pair_centers[pair]).times(pair_centers[pair][2]));
if (ntry == 0) {
sum_w[se[1]] += pair_centers[pair][2];
sum_w[se[0]] += pair_centers[pair][2];
}
}
double max_diff2 = 0;
for (int ncam = 0; ncam < num_cams; ncam++) {
xy_ly_new[ncam] =xy_ly[ncam].times(1.0 - step_weight).plus(xy_ly_new[ncam].times(step_weight/sum_w[ncam]));
double dx = xy_ly_new[ncam].get(0, 0) - xy_ly[ncam].get(0, 0);
double dy = xy_ly_new[ncam].get(1, 0) - xy_ly[ncam].get(1, 0);
double d2 = dx*dx + dy*dy;
if (d2 > max_diff2) {
max_diff2 = d2;
}
xy_ly[ncam] = xy_ly_new[ncam];
}
if (max_diff2 < min_corr2) {
break;
}
}
// correct to zero dx, dy
Matrix s_xy = new Matrix(2,1); // zero [x,Y] column
for (int ncam = 0; ncam < num_cams; ncam++) {
s_xy.plusEquals(xy_ly[ncam]);
}
s_xy.timesEquals(1.0/num_cams);
for (int ncam = 0; ncam < num_cams; ncam++) {
xy_ly[ncam].minusEquals(s_xy);
}
if (debug) {
System.out.println(String.format("%2s: %6s %6s",""," X", " Y"));
for (int ncam = 0; ncam <num_cams; ncam++) {
System.out.println(String.format("%2d: %6.3f %6.3f", ncam, xy_ly[ncam].get(0, 0), xy_ly[ncam].get(1, 0)));
}
}
// convert to ddnd
Matrix [] ddnd = new Matrix [num_cams];
Matrix s_dd = new Matrix(2,1); // zero [x,Y] column
for (int ncam = 0; ncam < num_cams; ncam++) {
ddnd[ncam] = m_disp[NTILE0][ncam].inverse().times(xy_ly[ncam]);
s_dd.plusEquals(ddnd[ncam]);
}
s_dd.set(1, 0, 0);
s_dd.timesEquals(1.0/num_cams);
for (int ncam = 0; ncam < num_cams; ncam++) {
ddnd[ncam].minusEquals(s_dd) ;
}
if (debug) {
System.out.println(String.format("%2s: %6s %6s",""," DD", " ND"));
for (int ncam = 0; ncam <num_cams; ncam++) {
System.out.println(String.format("%2d: %6.3f %6.3f", ncam, ddnd[ncam].get(0, 0), ddnd[ncam].get(1, 0)));
}
}
// apply corrections
if (apply) {
for (int ncam = 0; ncam <num_cams; ncam++) {
this.all_pars[ddisp_index + ncam] = -ddnd[ncam].get(0, 0);
this.all_pars[ndisp_index + ncam] = -ddnd[ncam].get(1, 0);
}
toVector();
}
return true;
}
public boolean initVector( // USED in lwir
boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
boolean adjust_scales, // adjust 2D correlation scales - lma_adjust_ag
......@@ -522,6 +625,8 @@ public class Corr2dLMA {
boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
boolean adjust_lazyeye_ortho, // obsolete - make == adjust_lazyeye_par adjust disparity corrections orthogonal to disparities lma_adjust_ly1
double [][] disp_str, // initial value of disparity
// double [][] ly_offsets_pairs, // common for all tiles: initial per sensor x,y LY offsets (or null)
// double [][] ly_offsets_pairs, // common for all tiles: initial per pair x,y LY offsets (or null)
double half_width, // A=1/(half_widh)^2 lma_half_width
double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
......@@ -875,8 +980,11 @@ public class Corr2dLMA {
private double [] getFxJt(
double [] vector,
double [][] jt) { // should be either [vector.length][samples.size()] or null - then only fx is calculated
if (this.gaussian_mode) return getFxJt_gaussian(vector, jt);
else return getFxJt_parabola(vector, jt);
if (this.gaussian_mode == 0) return getFxJt_parabola (vector, jt);
if (this.gaussian_mode == 1) return getFxJt_gaussian (vector, jt);
if (this.gaussian_mode == 2) return getFxJt_parabola_lim (vector, jt);
if (this.gaussian_mode == 3) return getFxJt_parabola_squared (vector, jt);
else return getFxJt_parabola (vector, jt);
}
private double [] getFxJt_parabola( // USED in lwir
......@@ -911,8 +1019,8 @@ public class Corr2dLMA {
//corr_wnd
for (int ns = 0; ns < num_samples; ns++) {
Sample s = samples.get(ns);
// int pair = pindx[s.fcam][s.scam]; // all pairs, noit just used?
int pair = s.pair; // all pairs, noit just used?
// int pair = pindx[s.fcam][s.scam]; // all pairs, not just used?
int pair = s.pair; // all pairs, not just used?
int [] fs = correlation2d.getPair(pair);
double A = AT[s.tile];
double B = BT[s.tile];
......@@ -1056,6 +1164,361 @@ public class Corr2dLMA {
return fx;
}
private double [] getFxJt_parabola_lim( // USED in lwir
double [] vector,
double [][] jt) { // should be either [vector.length][samples.size()] or null - then only fx is calculated
if (vector == null) return null;
double [] av = fromVector(vector);
Matrix [][] xcam_ycam = new Matrix[numTiles][num_cams];
double [][][][] xp_yp = new double[numTiles][num_cams][num_cams][];
double [] axc_yc = {transform_size - 1.0, transform_size-1.0};
Matrix xc_yc = new Matrix(axc_yc, 2);
double [] AT = new double [numTiles]; // av[A_INDEX];
double [] BT = new double [numTiles]; // av[B_INDEX];
double [] CT = new double [numTiles]; // A + av[CMA_INDEX];
for (int nTile = 0; nTile < numTiles; nTile++) if (used_tiles[nTile]){
for (int i = 0; i < num_cams; i++) if (used_cameras[i]) {
double [] add_dnd = {av[DISP_INDEX+ nTile * tile_params]+ av[ddisp_index + i], av[ndisp_index + i]};
xcam_ycam[nTile][i] = m_disp[nTile][i].times(new Matrix(add_dnd,2));
}
for (int f = 0; f < num_cams; f++) if (used_cameras[f]) {
for (int s = 0; s < num_cams; s++) if (used_cameras[s]) {
xp_yp[nTile][f][s] =xcam_ycam[nTile][f].minus(xcam_ycam[nTile][s]).plus(xc_yc).getColumnPackedCopy();
}
}
AT[nTile] = av[A_INDEX + nTile * tile_params];
BT[nTile] = av[B_INDEX + nTile * tile_params];
CT[nTile] = AT[nTile] + av[CMA_INDEX + nTile * tile_params];
}
int num_samples = samples.size();
double [] fx= new double [num_samples + 2 * num_cams];
//corr_wnd
for (int ns = 0; ns < num_samples; ns++) {
Sample s = samples.get(ns);
// int pair = pindx[s.fcam][s.scam]; // all pairs, not just used?
int pair = s.pair; // all pairs, not just used?
int [] fs = correlation2d.getPair(pair);
double A = AT[s.tile];
double B = BT[s.tile];
double C = CT[s.tile];
double Gp = av[G0_INDEX + pair + s.tile * tile_params];
double Wp = corr_wnd[s.ix][s.iy];
double WGp = Wp * Gp;
// double xmxp = s.ix - xp_yp[s.tile][s.fcam][s.scam][0];
// double ymyp = s.iy - xp_yp[s.tile][s.fcam][s.scam][1];
double xmxp = s.ix - xp_yp[s.tile][fs[0]][fs[1]][0]; // TODO - change format of xp_yp
double ymyp = s.iy - xp_yp[s.tile][fs[0]][fs[1]][1];
double xmxp2 = xmxp * xmxp;
double ymyp2 = ymyp * ymyp;
double xmxp_ymyp = xmxp * ymyp;
double d = Wp*(1.0 - (A*xmxp2 + 2 * B * xmxp_ymyp + C * ymyp2));
double lim_negative = (d < 0)? 0.0 : 1.0;
d *= lim_negative;
fx[ns] = d * Gp;
if (Double.isNaN(fx[ns])) {
System.out.println("fx["+ns+"]="+fx[ns]);
}
if (s.tile > 0) {
System.out.print("");
}
if (jt != null) {
if (par_map[DISP_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[DISP_INDEX + s.tile*tile_params]][ns] = lim_negative* 2 * WGp *
// ((A * xmxp + B * ymyp) * m_pairs[s.tile][s.fcam][s.scam].get(0, 0)+
// (B * xmxp + C * ymyp) * m_pairs[s.tile][s.fcam][s.scam].get(1, 0));
((A * xmxp + B * ymyp) * m_pairs[s.tile][s.pair].get(0, 0)+
(B * xmxp + C * ymyp) * m_pairs[s.tile][s.pair].get(1, 0));
}
if (par_map[A_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[A_INDEX + s.tile*tile_params]][ns] = -WGp * (xmxp2 + ymyp2) * lim_negative;
}
if (par_map[B_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[B_INDEX + s.tile*tile_params]][ns] = -WGp * 2 * xmxp_ymyp * lim_negative;
}
if (par_map[CMA_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[CMA_INDEX + s.tile*tile_params]][ns] = -WGp * ymyp2 * lim_negative;
}
// for (int p = 0; p < npairs[s.tile]; p++) { // par_mask[G0_INDEX + p] as all pairs either used, or not - then npairs == 0
for (int p = 0; p < num_pairs; p++) { // par_mask[G0_INDEX + p] as all pairs either used, or not - then npairs == 0
if (par_map[G0_INDEX + p + s.tile*tile_params] >= 0) {
jt[par_map[G0_INDEX + p + s.tile*tile_params]][ns] = (p== pair)? d : 0.0; // (par_mask[G0_INDEX + pair])? d;
}
}
if (lazy_eye) {
for (int f = 0; f < num_cams; f++) { // -1 for the last_cam and pre_last_cam
if (par_map[ddisp_index + f] >= 0) jt[par_map[ddisp_index + f]][ns] = 0.0;
if (par_map[ndisp_index + f] >= 0) jt[par_map[ndisp_index + f]][ns] = 0.0;
}
double [] dd_deriv = new double[3]; // derivatives by dependent dd_pre_lars, dd_last and nd_last (calculated on demand) with sign according to first/second in a pair
if ((fs[0] == pre_last_cam)) {
dd_deriv[0] = 2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][pre_last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][pre_last_cam].get(1, 0));
} else if ((fs[1] == pre_last_cam)) {
dd_deriv[0] = -2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][pre_last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][pre_last_cam].get(1, 0));
}
if ((fs[0] == last_cam)) {
dd_deriv[1] = 2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 0));
dd_deriv[2] = 2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 1));
} else if ((fs[1] == last_cam)) {
dd_deriv[1] = -2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 0));
dd_deriv[2] = -2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 1));
}
// now accumulate derivatives:
// first calculate contributions of the dd, nd directly:
if (par_map[ddisp_index + fs[0]] >= 0){ // par_map[ddisp_index + last_cam] always <0
jt[par_map[ddisp_index + fs[0]]][ns] += 2 * WGp * lim_negative *
((A * xmxp + B * ymyp) * m_disp[s.tile][fs[0]].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[0]].get(1, 0));
}
if (par_map[ddisp_index + fs[1]]>= 0){ // par_map[ddisp_index + last_cam] always <0
jt[par_map[ddisp_index + fs[1]]][ns] -= 2 * WGp * lim_negative *
((A * xmxp + B * ymyp) * m_disp[s.tile][fs[1]].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[1]].get(1, 0));
}
if (par_map[ndisp_index + fs[0]] >=0){
jt[par_map[ndisp_index + fs[0]]][ns] += 2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][fs[0]].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[0]].get(1, 1));
}
if (par_map[ndisp_index + fs[1]] >= 0) {
jt[par_map[ndisp_index + fs[1]]][ns] -= 2 * WGp * lim_negative *
( (A * xmxp + B * ymyp) * m_disp[s.tile][fs[1]].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[1]].get(1, 1));
}
// now calculate indirect ones through derivatives by dd_pre_last (dd_deriv[0]), dd_last (dd_deriv[1]) and nd_last (dd_deriv[2])
//// private int [] dd_indices; //normally 5-long (2 * ncam -3), absolute parameter indices for dd_pre_last, dd_last and nd_last
for (int ddn = 0; ddn < 3; ddn++) if (dd_deriv[ddn] != 0.0) {
for (int i = 0; i < dd_indices.length; i++) {
jt[dd_indices[i]][ns] += dd_deriv[ddn] * mddnd.get(ddn, i); // already includes lim_negative *
// if (Double.isNaN(jt[dd_indices[i]][ns])){
// System.out.println("getFxJt_parabola(): jt[dd_indices["+i+"]]["+ns+"] == NaN, dd_indices["+i+"]="+dd_indices[i]);
// }
}
}
}
}
}
if (lazy_eye) { // do not include lim_negative *
for (int n = 0; n < num_cams; n++) { // av[ddisp_index +last_cam] and other 2 are already populated
fx[num_samples + n] = av[ddisp_index + n];
fx[num_samples + num_cams + n] = av[ndisp_index + n];
}
// and derivatives
if (jt != null) {
for (int i = 0; i < num_cams; i++) {
if ((i != last_cam) && (i != pre_last_cam) && (par_map[ddisp_index + i] >= 0)) {
jt[par_map[ddisp_index + i]][num_samples + i] = 1.0;
}
if ((i != last_cam) && (par_map[ndisp_index + i] >= 0)) {
jt[par_map[ndisp_index + i] ][num_samples + num_cams + i] = 1.0;
}
}
for (int i = 0; i < dd_indices.length; i++) {
jt[dd_indices[i]][num_samples + pre_last_cam] = mddnd.get(0, i);
jt[dd_indices[i]][num_samples + last_cam] = mddnd.get(1, i);
jt[dd_indices[i]][num_samples + num_cams + last_cam] = mddnd.get(2, i);
}
}
}
return fx;
}
private double [] getFxJt_parabola_squared( // USED in lwir
double [] vector,
double [][] jt) { // should be either [vector.length][samples.size()] or null - then only fx is calculated
if (vector == null) return null;
double [] av = fromVector(vector);
Matrix [][] xcam_ycam = new Matrix[numTiles][num_cams];
double [][][][] xp_yp = new double[numTiles][num_cams][num_cams][];
double [] axc_yc = {transform_size - 1.0, transform_size-1.0};
Matrix xc_yc = new Matrix(axc_yc, 2);
double [] AT = new double [numTiles]; // av[A_INDEX];
double [] BT = new double [numTiles]; // av[B_INDEX];
double [] CT = new double [numTiles]; // A + av[CMA_INDEX];
for (int nTile = 0; nTile < numTiles; nTile++) if (used_tiles[nTile]){
for (int i = 0; i < num_cams; i++) if (used_cameras[i]) {
double [] add_dnd = {av[DISP_INDEX+ nTile * tile_params]+ av[ddisp_index + i], av[ndisp_index + i]};
xcam_ycam[nTile][i] = m_disp[nTile][i].times(new Matrix(add_dnd,2));
}
for (int f = 0; f < num_cams; f++) if (used_cameras[f]) {
for (int s = 0; s < num_cams; s++) if (used_cameras[s]) {
xp_yp[nTile][f][s] =xcam_ycam[nTile][f].minus(xcam_ycam[nTile][s]).plus(xc_yc).getColumnPackedCopy();
}
}
AT[nTile] = av[A_INDEX + nTile * tile_params];
BT[nTile] = av[B_INDEX + nTile * tile_params];
CT[nTile] = AT[nTile] + av[CMA_INDEX + nTile * tile_params];
}
int num_samples = samples.size();
double [] fx= new double [num_samples + 2 * num_cams];
//corr_wnd
for (int ns = 0; ns < num_samples; ns++) {
Sample s = samples.get(ns);
int pair = s.pair; // all pairs, not just used?
int [] fs = correlation2d.getPair(pair);
double A = AT[s.tile];
double B = BT[s.tile];
double C = CT[s.tile];
double Gp = av[G0_INDEX + pair + s.tile * tile_params];
double Wp = corr_wnd[s.ix][s.iy]; // *********
double WGp = Wp * Gp;
double xmxp = s.ix - xp_yp[s.tile][fs[0]][fs[1]][0]; // TODO - change format of xp_yp
double ymyp = s.iy - xp_yp[s.tile][fs[0]][fs[1]][1];
double xmxp2 = xmxp * xmxp;
double ymyp2 = ymyp * ymyp;
double xmxp_ymyp = xmxp * ymyp;
double d = Wp*(1.0 - (A*xmxp2 + 2 * B * xmxp_ymyp + C * ymyp2));
double lim_negative = (d < 0)? 0.0 : 1.0;
d *= lim_negative;
fx[ns] = d * d * Gp;
// d(d^2)/dp = 2*d *dd/dp
double lim_negative_2d = 2 * d * lim_negative;
if (Double.isNaN(fx[ns])) {
System.out.println("fx["+ns+"]="+fx[ns]);
}
if (s.tile > 0) {
System.out.print("");
}
if (jt != null) {
if (par_map[DISP_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[DISP_INDEX + s.tile*tile_params]][ns] = lim_negative_2d* 2 * WGp *
((A * xmxp + B * ymyp) * m_pairs[s.tile][s.pair].get(0, 0)+
(B * xmxp + C * ymyp) * m_pairs[s.tile][s.pair].get(1, 0));
}
if (par_map[A_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[A_INDEX + s.tile*tile_params]][ns] = -WGp * (xmxp2 + ymyp2) * lim_negative_2d;
}
if (par_map[B_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[B_INDEX + s.tile*tile_params]][ns] = -WGp * 2 * xmxp_ymyp * lim_negative_2d;
}
if (par_map[CMA_INDEX + s.tile*tile_params] >= 0) {
jt[par_map[CMA_INDEX + s.tile*tile_params]][ns] = -WGp * ymyp2 * lim_negative_2d;
}
for (int p = 0; p < num_pairs; p++) { // par_mask[G0_INDEX + p] as all pairs either used, or not - then npairs == 0
if (par_map[G0_INDEX + p + s.tile*tile_params] >= 0) {
jt[par_map[G0_INDEX + p + s.tile*tile_params]][ns] = (p== pair)? (d * d) : 0.0; // (par_mask[G0_INDEX + pair])? d;
}
}
if (lazy_eye) {
for (int f = 0; f < num_cams; f++) { // -1 for the last_cam and pre_last_cam
if (par_map[ddisp_index + f] >= 0) jt[par_map[ddisp_index + f]][ns] = 0.0;
if (par_map[ndisp_index + f] >= 0) jt[par_map[ndisp_index + f]][ns] = 0.0;
}
double [] dd_deriv = new double[3]; // derivatives by dependent dd_pre_lars, dd_last and nd_last (calculated on demand) with sign according to first/second in a pair
if ((fs[0] == pre_last_cam)) {
dd_deriv[0] = 2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][pre_last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][pre_last_cam].get(1, 0));
} else if ((fs[1] == pre_last_cam)) {
dd_deriv[0] = -2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][pre_last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][pre_last_cam].get(1, 0));
}
if ((fs[0] == last_cam)) {
dd_deriv[1] = 2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 0));
dd_deriv[2] = 2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 1));
} else if ((fs[1] == last_cam)) {
dd_deriv[1] = -2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 0));
dd_deriv[2] = -2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][last_cam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][last_cam].get(1, 1));
}
// now accumulate derivatives:
// first calculate contributions of the dd, nd directly:
if (par_map[ddisp_index + fs[0]] >= 0){ // par_map[ddisp_index + last_cam] always <0
jt[par_map[ddisp_index + fs[0]]][ns] += 2 * WGp * lim_negative_2d *
((A * xmxp + B * ymyp) * m_disp[s.tile][fs[0]].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[0]].get(1, 0));
}
if (par_map[ddisp_index + fs[1]]>= 0){ // par_map[ddisp_index + last_cam] always <0
jt[par_map[ddisp_index + fs[1]]][ns] -= 2 * WGp * lim_negative_2d *
((A * xmxp + B * ymyp) * m_disp[s.tile][fs[1]].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[1]].get(1, 0));
}
if (par_map[ndisp_index + fs[0]] >=0){
jt[par_map[ndisp_index + fs[0]]][ns] += 2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][fs[0]].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[0]].get(1, 1));
}
if (par_map[ndisp_index + fs[1]] >= 0) {
jt[par_map[ndisp_index + fs[1]]][ns] -= 2 * WGp * lim_negative_2d *
( (A * xmxp + B * ymyp) * m_disp[s.tile][fs[1]].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][fs[1]].get(1, 1));
}
// now calculate indirect ones through derivatives by dd_pre_last (dd_deriv[0]), dd_last (dd_deriv[1]) and nd_last (dd_deriv[2])
//// private int [] dd_indices; //normally 5-long (2 * ncam -3), absolute parameter indices for dd_pre_last, dd_last and nd_last
for (int ddn = 0; ddn < 3; ddn++) if (dd_deriv[ddn] != 0.0) {
for (int i = 0; i < dd_indices.length; i++) {
jt[dd_indices[i]][ns] += dd_deriv[ddn] * mddnd.get(ddn, i); // already includes lim_negative *
// if (Double.isNaN(jt[dd_indices[i]][ns])){
// System.out.println("getFxJt_parabola(): jt[dd_indices["+i+"]]["+ns+"] == NaN, dd_indices["+i+"]="+dd_indices[i]);
// }
}
}
}
}
}
if (lazy_eye) { // do not include lim_negative *
for (int n = 0; n < num_cams; n++) { // av[ddisp_index +last_cam] and other 2 are already populated
fx[num_samples + n] = av[ddisp_index + n];
fx[num_samples + num_cams + n] = av[ndisp_index + n];
}
// and derivatives
if (jt != null) {
for (int i = 0; i < num_cams; i++) {
if ((i != last_cam) && (i != pre_last_cam) && (par_map[ddisp_index + i] >= 0)) {
jt[par_map[ddisp_index + i]][num_samples + i] = 1.0;
}
if ((i != last_cam) && (par_map[ndisp_index + i] >= 0)) {
jt[par_map[ndisp_index + i] ][num_samples + num_cams + i] = 1.0;
}
}
for (int i = 0; i < dd_indices.length; i++) {
jt[dd_indices[i]][num_samples + pre_last_cam] = mddnd.get(0, i);
jt[dd_indices[i]][num_samples + last_cam] = mddnd.get(1, i);
jt[dd_indices[i]][num_samples + num_cams + last_cam] = mddnd.get(2, i);
}
}
}
return fx;
}
private double [] getFxJt_gaussian( // USED in lwir
double [] vector,
double [][] jt) { // should be either [vector.length][samples.size()] or null - then only fx is calculated
......@@ -1296,8 +1759,6 @@ public class Corr2dLMA {
System.out.println();
}
}
}
public double [][] getTileStats(){
double [] rms = getRmsTile();
......@@ -1334,8 +1795,8 @@ public class Corr2dLMA {
if (i < samples.size()) {
s = samples.get(i);
int [] fs = correlation2d.getPair(s.pair);
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f fx=%9.6f w=%9.7f pair=%2d scam=%2d tile=%d",
i, s.ix, s.iy, s.v, fx_pos, s.w, fs[0], fs[1], s.tile));
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f fx=%9.6f w=%9.7f pair=%3d fcam=%2d scam=%2d tile=%d",
i, s.ix, s.iy, s.v, fx_pos, s.w, s.pair, fs[0], fs[1], s.tile));
}
else {
System.out.println(String.format("%3d: %2s %2s v=%9.6f fx=%9.6f w=%9.7f", i, "-", "-", this.values[i], fx_pos, this.weights[i]));
......@@ -1345,8 +1806,8 @@ public class Corr2dLMA {
int ns =0;
for (Sample s:samples){
int [] fs = correlation2d.getPair(s.pair);
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f w=%9.7f fcam=%1d scam=%1d tile=%d",
ns++, s.ix, s.iy, s.v, s.w, fs[0], fs[1], s.tile));
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f w=%9.7f pair= %3d fcam=%2d scam=%2d tile=%d",
ns++, s.ix, s.iy, s.v, s.w, s.pair, fs[0], fs[1], s.tile));
}
}
}
......@@ -1424,9 +1885,12 @@ public class Corr2dLMA {
}
Matrix m5 = new Matrix(a5,a5.length); // single column, normally 5 rows
Matrix m3 = mddnd.times(m5);
all_pars[ddisp_index + used_cams_rmap[pre_last_cam]] = m3.get(0, 0);
all_pars[ddisp_index + used_cams_rmap[last_cam]] = m3.get(1, 0);
all_pars[ndisp_index + used_cams_rmap[last_cam]] = m3.get(2, 0);
// all_pars[ddisp_index + used_cams_rmap[pre_last_cam]] = m3.get(0, 0);
// all_pars[ddisp_index + used_cams_rmap[last_cam]] = m3.get(1, 0);
// all_pars[ndisp_index + used_cams_rmap[last_cam]] = m3.get(2, 0);
all_pars[ddisp_index + pre_last_cam] = m3.get(0, 0);
all_pars[ddisp_index + last_cam] = m3.get(1, 0);
all_pars[ndisp_index + last_cam] = m3.get(2, 0);
}
......@@ -1446,9 +1910,12 @@ public class Corr2dLMA {
}
Matrix m5 = new Matrix(a5,a5.length); // single column, normally 5 rows
Matrix m3 = mddnd.times(m5);
ap[ddisp_index + used_cams_rmap[pre_last_cam]] = m3.get(0, 0);
ap[ddisp_index + used_cams_rmap[last_cam]] = m3.get(1, 0);
ap[ndisp_index + used_cams_rmap[last_cam]] = m3.get(2, 0);
// ap[ddisp_index + used_cams_rmap[pre_last_cam]] = m3.get(0, 0);
// ap[ddisp_index + used_cams_rmap[last_cam]] = m3.get(1, 0);
// ap[ndisp_index + used_cams_rmap[last_cam]] = m3.get(2, 0);
ap[ddisp_index + pre_last_cam] = m3.get(0, 0);
ap[ddisp_index + last_cam] = m3.get(1, 0);
ap[ndisp_index + last_cam] = m3.get(2, 0);
return ap;
}
......@@ -1666,7 +2133,10 @@ public class Corr2dLMA {
if (Double.isNaN(maxmin_amp[tile][0])) {
continue;
}
double avg = 0.50*(maxmin_amp[tile][0]+maxmin_amp[tile][1]);
if (maxmin_amp[tile][1] < 0.0) {
continue; // inverse maximum - discard tile
}
double avg = 0.50*(maxmin_amp[tile][0]+maxmin_amp[tile][1]); // max_min[1] can be negative - filter it out?
double rrms = rms[tile]/avg;
if (((lma_max_rel_rms > 0.0) && (rrms > lma_max_rel_rms)) ||
(Math.max(abc[tile][0], abc[tile][2]) < lma_min_max_ac)
......@@ -1692,7 +2162,11 @@ public class Corr2dLMA {
if ((strength < lma_min_strength) || Double.isNaN(disparity)) {
continue;
}
strength = Math.sqrt(strength * Math.sqrt(abc[tile][0] * abc[tile][2])); // / area ); // new strength
double ac = abc[tile][0] * abc[tile][2];
if (ac < 0) {
continue;
}
strength = Math.sqrt(strength * Math.sqrt(ac)); // / area ); // new strength
ds[tile][0] = disparity;
ds[tile][1] = (strength * lma_str_scale) + lma_str_offset;
}
......@@ -1947,9 +2421,12 @@ public class Corr2dLMA {
public double [][] getABCTile(){
double [][] abc = new double[numTiles][3];
for (int tile = 0; tile < numTiles; tile++) {
abc[tile][0] = (par_mask[A_INDEX+ tile * tile_params])? all_pars[A_INDEX+ tile * tile_params] :Double.NaN;
abc[tile][1] = (par_mask[B_INDEX+ tile * tile_params])? all_pars[B_INDEX+ tile * tile_params] :Double.NaN;
abc[tile][2] = abc[tile][0] + ( (par_mask[CMA_INDEX+ tile * tile_params])? all_pars[CMA_INDEX+ tile * tile_params] :Double.NaN);
// abc[tile][0] = (par_mask[A_INDEX+ tile * tile_params])? all_pars[A_INDEX+ tile * tile_params] :Double.NaN;
// abc[tile][1] = (par_mask[B_INDEX+ tile * tile_params])? all_pars[B_INDEX+ tile * tile_params] :Double.NaN;
// abc[tile][2] = abc[tile][0] + ( (par_mask[CMA_INDEX+ tile * tile_params])? all_pars[CMA_INDEX+ tile * tile_params] :Double.NaN);
abc[tile][0] = all_pars[A_INDEX+ tile * tile_params];
abc[tile][1] = all_pars[B_INDEX+ tile * tile_params];
abc[tile][2] = abc[tile][0] + all_pars[CMA_INDEX+ tile * tile_params];
}
return abc;
}
......@@ -2048,7 +2525,7 @@ public class Corr2dLMA {
this.good_or_bad_rms = this.last_rms.clone();
if (debug_level > 3) {
debugJt(
0.000001, // double delta,
0.000001, // double delta, // 0.2, //
this.vector); // double [] vector);
}
}
......@@ -2073,7 +2550,7 @@ public class Corr2dLMA {
return rslt;
}
if (debug_level>2) {
System.out.println("(JtJ + lambda*diag(JtJ).inv()");
System.out.println("(JtJ + lambda*diag(JtJ)).inv()");
jtjl_inv.print(18, 6);
}
//last_jt has NaNs
......
......@@ -597,8 +597,11 @@ public class CorrVector{ // TODO: Update to non-quad (extract to a file first)?
}
@Override
public String toString() // USED in lwir
{
public String toString() { // USED in lwir
return toString(false);
}
public String toString(boolean short_out) {
String s;
double [] sym_vect = toSymArray(null);
double [] v = new double [vector.length];
......@@ -648,7 +651,10 @@ public class CorrVector{ // TODO: Update to non-quad (extract to a file first)?
s+= "tilt (up): "; for (int i = 0; i < n; i++) s += String.format(" %8.4fpx", tilts[i]); s+=" (shift of the image center)\n";
s+= "azimuth (right):"; for (int i = 0; i < n; i++) s += String.format(" %8.4fpx", azimuths[i]); s+=" (shift of the image center)\n";
s+= "roll (CW): "; for (int i = 0; i < n; i++) s += String.format(" %8.4fpx", rolls[i]); s+=" (shift at the image half-width from the center)\n";
s+= "diff zoom (in): "; for (int i = 0; i < n; i++) s += String.format(" %8.4fpx", zooms[i]); s+=" (shift at the image half-width from the center)\n";
s+= "diff zoom (in): "; for (int i = 0; i < n; i++) s += String.format(" %8.4fpx", zooms[i]); s+=" (shift at the image half-width from the center)\n";
if (short_out) {
return s;
}
s += "Symmetrical vector:\n";
if (getNumSensors() == 4) { // Use arrows for quad camera only (but update to match new
// ← → ↑ ↓ ⇖ ⇗ ⇘ ⇙ ↔ ↺ ↻
......
......@@ -2120,6 +2120,45 @@ public class Correlation2d {
}
return rslt;
}
/**
* Get fractional center as a "center of mass" for all pixels
* Data should be masked by the caller
* @param data correlation data [(2 * transform_size - 1) * (2 * transform_size - 1)]
* @param data_width
* @param center_row
* @param debug
* @return argmax() relative to the tile center
*/
public double [] getMaxXYCm( // not used in lwir
double [] data,
int data_width, // = 2 * transform_size - 1;
int center_row,
boolean debug)
{
int data_height = data.length/data_width;
int center_x = (data_width - 1)/2; // = transform_size - 1;
//calculate as "center of mass"
double s0 = 0, sx=0,sy = 0;
for (int iy = 0; iy < data_height; iy++) {
double y = iy - center_row;
for (int ix = 0; ix < data_width; ix++) {
double x = ix - center_x;
double d = data[iy * data_width + ix];
s0 += d;
sx += d * x;
sy += d * y;
}
}
double [] rslt = {sx / s0, sy / s0};
if (debug){
System.out.println("getMaxXYCm() -> "+rslt[0]+":"+rslt[1]);
}
return rslt;
}
/**
* Calculate 1-d maximum location, strength and half-width for the special strip (odd rows shifted by 0.5
* Negative values are ignored!
......@@ -3430,7 +3469,6 @@ public class Correlation2d {
imgdtt_params.lma_adjust_wy, // boolean adjust_ellipse, // allow non-circular correlation maximums lma_adjust_wy
true, // imgdtt_params.lma_adjust_wxy, // boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
true, // imgdtt_params.lma_adjust_ly1, // boolean adjust_lazyeye_ortho, // adjust disparity corrections orthogonal to disparities lma_adjust_ly1
ds, // disp_str, // xcenter_str, // double [][] disp_str, // initial value of disparity/strength/?
imgdtt_params.lma_half_width, // double half_width, // A=1/(half_widh)^2 lma_half_width
imgdtt_params.lma_cost_wy, // double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
......@@ -3600,7 +3638,7 @@ public class Correlation2d {
double [][] dbg_corr = debug_graphic ? new double [corrs.length][] : null;
double [][] dbg_weights = debug_graphic ? new double [corrs.length][] : null;
// double [][] dbg_weights = debug_graphic ? new double [corrs.length][] : null;
if (debug_graphic) {
(new ShowDoubleFloatArrays()).showArrays(
corrs,
......@@ -3614,6 +3652,7 @@ public class Correlation2d {
// for (int npair = 0; npair < corrs.length; npair++) if ((corrs[npair] != null) && (((pair_mask >> npair) & 1) !=0)){
double [][] filtWeight = new double [corrs.length][];
for (int npair = 0; npair < corrs.length; npair++) if ((corrs[npair] != null) && (pair_mask[npair])){
// double[] corr = corrs[npair].clone();
......@@ -3638,7 +3677,7 @@ public class Correlation2d {
// filter convex
int ix0 = (imx % corr_size) - center; // signed, around center to match filterConvex
int iy0 = (imx / corr_size) - center; // signed, around center to match filterConvex
double [] filtWeight = filterConvex(
filtWeight[npair] = filterConvex(
corr_blur, // double [] corr_data,
imgdtt_params.cnvx_hwnd_size, // int hwin,
ix0, // int x0,
......@@ -3647,17 +3686,17 @@ public class Correlation2d {
imgdtt_params.cnvx_weight, // double nc_cost,
(debug_level > 2)); // boolean debug);
if (dbg_corr != null) dbg_corr [npair] = corr_blur;
if (dbg_weights != null) dbg_weights[npair] = filtWeight;
// if (dbg_weights != null) dbg_weights[npair] = filtWeight[npair];
// Normalize weight for each pair to compensate for different number of convex samples?
// int fcam = PAIRS[npair][0];
// int scam = PAIRS[npair][1];
for (int i = 1; i < filtWeight.length; i++) if (filtWeight[i] > 0.0) {
for (int i = 1; i < filtWeight[npair].length; i++) if (filtWeight[npair][i] > 0.0) {
int ix = i % corr_size; // >=0
int iy = i / corr_size; // >=0
double v = corrs[npair][i]; // not blurred
double w = filtWeight[i];
double w = filtWeight[npair][i];
if (vasw_pwr != 0) {
w *= Math.pow(Math.abs(v), vasw_pwr);
}
......@@ -3685,7 +3724,7 @@ public class Correlation2d {
getCorrTitles());
(new ShowDoubleFloatArrays()).showArrays(
dbg_weights,
filtWeight,
corr_size,
corr_size,
true,
......@@ -3699,22 +3738,45 @@ public class Correlation2d {
disp_str2[0] = disp_str;
}
// double [][] disp_str2 = {{0.0, 1.0}}; // temporary // will be calculated/set later
boolean lmaSuccess = false;
double [] disp = null;
// adjust_ly
double [][] ly_offsets_pairs = null;
if (adjust_ly) {
ly_offsets_pairs = getPairsCenters(
corrs, // double [][] corrs,
filtWeight); // double [][] weights)
}
double step_weight = 0.5; // scale corrections
double min_correction = 0.1; // exit when maximal XY correction is below
while (!lmaSuccess) {
lma.initVector( // USED in lwir
lma.initVector(
imgdtt_params.lmas_adjust_wm, // boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
imgdtt_params.lmas_adjust_ag, // boolean adjust_scales, // adjust 2D correlation scales - lma_adjust_ag
imgdtt_params.lmas_adjust_wy, // boolean adjust_ellipse, // allow non-circular correlation maximums lma_adjust_wy
(adjust_ly ? imgdtt_params.lma_adjust_wxy : false), //imgdtt_params.lma_adjust_wxy, // boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
(adjust_ly ? imgdtt_params.lma_adjust_ly1: false), // imgdtt_params.lma_adjust_ly1, // boolean adjust_lazyeye_ortho, // adjust disparity corrections orthogonal to disparities lma_adjust_ly1
disp_str2, // xcenter, // double disp0, // initial value of disparity
disp_str2, // xcenter,
imgdtt_params.lma_half_width, // double half_width, // A=1/(half_widh)^2 lma_half_width
(adjust_ly ? imgdtt_params.lma_cost_wy : 0.0), // imgdtt_params.lma_cost_wy, // double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
(adjust_ly ? imgdtt_params.lma_cost_wxy : 0.0) //imgdtt_params.lma_cost_wxy // double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
);
lma.setMatrices(disp_dist);
lma.initMatrices(); // should be called after initVector and after setMatrices
boolean all_sensors_used = lma.setInitialLYOffsets(
ly_offsets_pairs, // double [][] pair_centers,
step_weight, // double step_weight, // scale corrections
min_correction, // double min_correction ){ // exit when maximal XY correction is below
(debug_level > 0)); //
if (adjust_ly && !all_sensors_used) {
return null; //LY requested, but not all sensors present
}
//center
disp = null;
if (need_poly) {
......@@ -3724,8 +3786,15 @@ public class Correlation2d {
debug_graphic?dbg_title:null); // double [] rslt = {-approx2d[0], approx2d[2], hwx, hwy};
if (disp == null) {
if (debug_level > 0) {
System.out.println("Poly disparity=NULL");
if (imgdtt_params.lmas_poly_continue && (disp == null)) {
disp = disp_str2[0];
if (debug_level > 0) {
System.out.println("Poly disparity=NULL, using tile center for initial LMA");
}
} else {
if (debug_level > 0) {
System.out.println("Poly disparity=NULL, set lmas_poly_continue to true to use tile center instead");
}
}
} else {
disp[1] *= imgdtt_params.lmas_poly_str_scale;
......@@ -3748,6 +3817,9 @@ public class Correlation2d {
} else {
disp = disp_str;
}
if (disp != null) {
disp_str2[0] = disp;
lma.initDisparity( // USED in lwir
......@@ -3785,7 +3857,7 @@ public class Correlation2d {
lma.updateFromVector();
double [][] dispStr = lma.lmaDisparityStrength(
double [][] dispStr = lma.lmaDisparityStrength( //TODO: add parameter to filter out negative minimums ?
imgdtt_params.lmas_max_rel_rms, // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
imgdtt_params.lmas_min_strength, // minimal composite strength (sqrt(average amp squared over absolute RMS)
imgdtt_params.lmas_min_ac, // minimal of A and C coefficients maximum (measures sharpest point/line)
......@@ -3846,7 +3918,32 @@ public class Correlation2d {
return lmaSuccess? lma: null;
}
public double [][] getPairsCenters(
double [][] corrs,
double [][] weights){
double [][] xy_offsets_pairs = new double[corrs.length][]; //
for (int np = 0; np < corrs.length; np++) if (corrs[np]!= null) {
double [] wcorr = new double [corrs[np].length];
double pair_weights = 0.0;
for (int i = 0; i < wcorr.length; i++) if (weights[np][i] > 0.0){
wcorr[i] = corrs[np][i]*weights[np][i];
pair_weights += wcorr [i];
}
double [] dxy = getMaxXYCm(
wcorr, // double [] data,
getCombWidth(), // data_width,
getCombHeight()/2 - getCombOffset(), // int center_row
false); // boolean debug)
xy_offsets_pairs[np] = new double[3];
xy_offsets_pairs[np][0] = dxy[0];
xy_offsets_pairs[np][1] = dxy[1];
xy_offsets_pairs[np][2] = pair_weights;
}
return xy_offsets_pairs;
}
public Correlations2dLMA corrLMA( // USED in lwir
ImageDttParameters imgdtt_params,
double [][] corrs,
......
......@@ -200,19 +200,12 @@ public class ExtrinsicAdjustment {
public void showInput(double[][] data, String title) {
int clusters = clustersX * clustersY;
// double [][] pixels = new double [ExtrinsicAdjustment.INDX_LENGTH+4][clusters];
// String [] titles = new String[ExtrinsicAdjustment.INDX_LENGTH+4];
// for (int i = 0; i < ExtrinsicAdjustment.INDX_LENGTH; i++) {
double [][] pixels = new double [indx_length + 4][clusters];
String [] titles = new String[indx_length + 4];
for (int i = 0; i < indx_length; i++) {
// titles[i] = ExtrinsicAdjustment.DATA_TITLES[i];
titles[i] = data_titles[i];
}
// titles[ExtrinsicAdjustment.INDX_LENGTH+0]="Force_disparity";
// titles[ExtrinsicAdjustment.INDX_LENGTH+1]="dx-sum";
// titles[ExtrinsicAdjustment.INDX_LENGTH+2]="dy_sum";
// titles[ExtrinsicAdjustment.INDX_LENGTH+3]="dd_sum";
titles[indx_length+0]="Force_disparity";
titles[indx_length+1]="dx-sum";
titles[indx_length+2]="dy_sum";
......@@ -223,13 +216,10 @@ public class ExtrinsicAdjustment {
for (int c = 0; c < data[cluster].length; c++) {
pixels[c][cluster] = data[cluster][c];
}
for (int i = 0;i <4; i++) {
// pixels[ExtrinsicAdjustment.INDX_LENGTH+1][cluster] += 0.25 * data[cluster][ExtrinsicAdjustment.INDX_X0 + 2 * i];
// pixels[ExtrinsicAdjustment.INDX_LENGTH+2][cluster] += 0.25 * data[cluster][ExtrinsicAdjustment.INDX_X0 + 2 * i + 1];
// pixels[ExtrinsicAdjustment.INDX_LENGTH+3][cluster] += 0.25 * data[cluster][ExtrinsicAdjustment.INDX_DD0 + i];
pixels[indx_length+1][cluster] += 0.25 * data[cluster][INDX_X0 + 2 * i];
pixels[indx_length+2][cluster] += 0.25 * data[cluster][INDX_X0 + 2 * i + 1];
pixels[indx_length+3][cluster] += 0.25 * data[cluster][indx_dd0 + i];
for (int i = 0;i < num_sensors; i++) {
pixels[indx_length+1][cluster] += data[cluster][INDX_X0 + 2 * i]/num_sensors;
pixels[indx_length+2][cluster] += data[cluster][INDX_X0 + 2 * i + 1]/num_sensors;
pixels[indx_length+3][cluster] += data[cluster][indx_dd0 + i]/num_sensors;
}
} else {
for (int c = 0; c < pixels.length; c++) {
......@@ -238,7 +228,6 @@ public class ExtrinsicAdjustment {
}
}
if (force_disparity!=null) {
// pixels[ExtrinsicAdjustment.INDX_LENGTH][cluster] = force_disparity[cluster]?1.0:0.0;
pixels[indx_length][cluster] = force_disparity[cluster]?1.0:0.0;
}
}
......@@ -250,6 +239,67 @@ public class ExtrinsicAdjustment {
title,
titles); //ExtrinsicAdjustment.DATA_TITLES);
}
public double [] weightedLY (
double [][] data, // may be difference between two LY
double [][] ref_data, // to use strength from it (or the same as data)
double min_strength) {
if (ref_data == null) ref_data = data;
double [] avg = new double [indx_length];
double [] weights = new double [data.length];
double sw = 0;
for (int cluster = 0; cluster < weights.length; cluster++) if ((ref_data[cluster] != null) && (data[cluster] != null) && !Double.isNaN (ref_data[cluster][INDX_STRENGTH])){
double w = ref_data[cluster][INDX_STRENGTH] - min_strength;
if (w < 0.0) w = 0.0;
weights[cluster] = w;
sw += w;
}
if (sw <= 0.0) {
return null;
}
for (int cluster = 0; cluster < weights.length; cluster++) {
weights[cluster] /= sw;
}
for (int n = 0; n < avg.length; n ++ ) {
for (int cluster = 0; cluster < weights.length; cluster++) {
if ((weights[cluster] > 0.0) && !Double.isNaN(data[cluster][n])){
avg[n] += weights[cluster] * data[cluster][n];
}
}
}
return avg;
}
public String stringWeightedLY(
double [][] data, // may be difference between two LY
double [][] ref_data, // to use strength from it
double min_strength,
int [] format,
String suffix)
{
String dfmt = "%"+format[0]+"."+format[1]+"f,";
String dfmta = "%"+format[0]+"."+(2*format[1])+"f";
String hfmt = "%"+(format[0]/2)+"d%"+(format[0]-format[0]/2)+"s,";
double [] avg = weightedLY (data, ref_data, min_strength);
String s = "";
double avg_dd = 0.0, avg_nd = 0.0, avg_x = 0.0, avg_y = 0.0;
for (int p = 0; p < num_sensors; p++) avg_dd += avg[indx_dd0 + p]/ num_sensors;
for (int p = 0; p < num_sensors; p++) avg_nd += avg[indx_nd0 + p]/ num_sensors;
for (int p = 0; p < num_sensors; p++) avg_x += avg[INDX_X0 + 2 * p + 0]/ num_sensors;
for (int p = 0; p < num_sensors; p++) avg_y += avg[INDX_X0 + 2 * p + 1]/ num_sensors;
s +="diff_disparity = "+String.format(dfmt, avg[INDX_DIFF])+"\n";
s += "Port"+suffix+": [";
for (int p = 0; p < num_sensors; p++) s += String.format(hfmt, p, "");s+="]\n";
s += "DD"+suffix+"= [";
for (int p = 0; p < num_sensors; p++) s += String.format(dfmt, avg[indx_dd0 + p]);s+="]" + String.format(" # avg = "+dfmta+"\n", avg_dd);
s += "ND"+suffix+"= [";
for (int p = 0; p < num_sensors; p++) s += String.format(dfmt, avg[indx_nd0 + p]);s+="]" + String.format(" # avg = "+dfmta+"\n", avg_nd);
s += "DX"+suffix+"= [";
for (int p = 0; p < num_sensors; p++) s += String.format(dfmt, avg[INDX_X0 + 2 * p + 0]);s+="]" + String.format(" # avg = "+dfmta+"\n", avg_x);
s += "DY"+suffix+"= [";
for (int p = 0; p < num_sensors; p++) s += String.format(dfmt, avg[INDX_X0 + 2 * p + 1]);s+="]" + String.format(" # avg = "+dfmta+"\n", avg_y);
return s;
}
/*
public static void showLYInput(
double[][] data,
......
......@@ -1703,19 +1703,14 @@ public class ImageDttCPU {
final int globalDebugLevel)
{
final boolean debug_distort= (globalDebugLevel >0); // .false; // true;
// final double [][] debug_offsets = null;
//lma_dbg_scale
// final double [][] debug_offsets = new double[imgdtt_params.lma_dbg_offset.length][2];
final double [][] debug_offsets = null;
/*
final double [][] debug_offsets = new double[getNumSensors()][2];
for (int i = 0; i < imgdtt_params.lma_dbg_offset.length; i++) for (int j = 0; j < debug_offsets[i].length; j++) {
debug_offsets[i][j] = imgdtt_params.lma_dbg_offset[i][j]*imgdtt_params.lma_dbg_scale;
}
// final int quad = 4; // number of subcameras
// final int nSens = geometryCorrection.getNumSensors();
*/
final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
// final int numColors = image_data[0].length;
final int height=image_data[0][0].length/width;
final int tilesX=width/transform_size;
final int tilesY=height/transform_size;
......@@ -2557,6 +2552,10 @@ public class ImageDttCPU {
final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
final int mcorr_comb_width, // combined correlation tile width
final int mcorr_comb_height, // combined correlation tile full height
final int mcorr_comb_offset, // combined correlation tile height offset: 0 - centered (-height/2 to height/2), height/2 - only positive (0 to height)
final double mcorr_comb_disp, // Combined tile per-pixel disparity for baseline == side of a square
final int debug_tileX,
final int debug_tileY,
......@@ -2564,12 +2563,13 @@ public class ImageDttCPU {
final int globalDebugLevel)
{
final boolean debug_distort= (globalDebugLevel >0); // .false; // true;
// final double [][] debug_offsets = new double[imgdtt_params.lma_dbg_offset.length][2];
final double [][] debug_offsets = null;
/*
final double [][] debug_offsets = new double[getNumSensors()][2];
for (int i = 0; i < imgdtt_params.lma_dbg_offset.length; i++) for (int j = 0; j < debug_offsets[i].length; j++) {
debug_offsets[i][j] = imgdtt_params.lma_dbg_offset[i][j]*imgdtt_params.lma_dbg_scale;
}
*/
final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
final int height=image_data[0][0].length/width;
......@@ -2579,6 +2579,7 @@ public class ImageDttCPU {
final int clustersX= (tilesX + tileStep - 1) / tileStep;
final int clustersY= (tilesY + tileStep - 1) / tileStep;
final double [][] lazy_eye_data = new double [clustersY*clustersX][];
final double [][] dbg_num_good_tiles = new double [2][clustersY*clustersX];
final int nClustersInChn=clustersX * clustersY;
final int clustSize = tileStep*tileStep;
......@@ -2596,6 +2597,12 @@ public class ImageDttCPU {
if (isCorrHor (mcorr_sel)) corr_calculate = correlation2d.selectHorizontal (corr_calculate);
if (isCorrVert (mcorr_sel)) corr_calculate = correlation2d.selectVertical (corr_calculate);
correlation2d.setCorrPairs(corr_calculate); // will limit correlation pairs calculation
correlation2d.generateResample( // should be called before
mcorr_comb_width, // combined correlation tile width
mcorr_comb_height, // combined correlation tile full height
mcorr_comb_offset, // combined correlation tile height offset: 0 - centered (-height/2 to height/2), height/2 - only positive (0 to height)
mcorr_comb_disp);
}
final double [][][][][][] clt_data = new double[numSensors][numcol][tilesY][tilesX][][];
final Thread[] threads = newThreadArray(threadsMax);
......@@ -2746,6 +2753,8 @@ public class ImageDttCPU {
double [][][] corrs = new double [clustSize][][];
double [][] disp_str = new double [clustSize][];
double [][] pxpy = new double [clustSize][2];
// double [] tile_weights = new double [clustSize];
boolean debugCluster = (clustX == debug_clustX) && (clustY == debug_clustY);
if (debugCluster) {
System.out.println("debugCluster");
......@@ -2754,7 +2763,7 @@ public class ImageDttCPU {
// filter only tiles with similar disparity to enable lazy eye for the ERS.
int num_good_tiles = 0;
while (true) {
// num_good_tiles = 0; // FIXME: Was missing - uncomment?
num_good_tiles = 0; // FIXME: Was missing - uncomment?
int mnTx = -1, mnTy = -1, mxTx = -1, mxTy = -1;
double mn = Double.NaN;
double mx = Double.NaN;
......@@ -2798,6 +2807,9 @@ public class ImageDttCPU {
if (num_good_tiles == 0) {
continue;
}
dbg_num_good_tiles[0][nCluster] = num_good_tiles;
int dbg_num_good_lma= 0;
// num_good_tiles in a cluster > 0
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
......@@ -3010,11 +3022,44 @@ public class ImageDttCPU {
if (tile_lma_debug_level > 0) {
System.out.println("Will run getMaxXSOrtho( ) for tileX="+tileX+", tileY="+tileY);
}
// get CM disparity/strength
if (true) {
double [] corr_combo_tile = correlation2d.accumulateInit();
double sumw = correlation2d.accummulatePairs(
corr_combo_tile, // double [] accum_tile,
corrs[cTile], // double [][] corr_tiles,
correlation2d.selectAll(), // boolean [] selection,
1.0); // double weight);
correlation2d.normalizeAccumulatedPairs(
corr_combo_tile,
sumw);
//tile_weights[cTile] = sumw;
// double [] disp_str_combo = new double[2];
int [] ixy = correlation2d.getMaxXYInt( // find integer pair or null if below threshold // USED in lwir
corr_combo_tile, // double [] data, // [data_size * data_size]
null, // disp_str_combo,
correlation2d.getCombWidth(), // data_width,
correlation2d.getCombHeight()/2 - correlation2d.getCombOffset(), // int center_row, ??????????????
true, // boolean axis_only,
-1.0, // imgdtt_params.min_corr, // ???? double minMax, // minimal value to consider (at integer location, not interpolated)
false); // debugCluster); // tile_lma_debug_level > 0); // boolean debug);
double [] corr_stat = correlation2d.getMaxXCm( // get fractional center as a "center of mass" inside circle/square from the integer max
corr_combo_tile, // double [] data, // [data_size * data_size]
correlation2d.getCombWidth(), // int data_width, // = 2 * transform_size - 1;
correlation2d.getCombHeight()/2 - correlation2d.getCombOffset(),// int center_row,
ixy[0], // int ixcenter, // integer center x
false); // debugCluster); // (tile_lma_debug_level > 0)); // boolean debug);
if (corr_stat != null) { // almost always
disp_str[cTile] = new double [] {-corr_stat[0], corr_stat[1]};
}
}
// debug new LMA correlations
int tdl = debugCluster ? tile_lma_debug_level : -3;
// find disp_str for each tile in a cluster
if (true) { // debugCluster1) {
if (globalDebugLevel > 1000) { // true) { // debugCluster1) {
if (debugCluster && (globalDebugLevel > -1)) { // -2)) {
System.out.println("Will run new LMA for tileX="+tileX+", tileY="+tileY);
}
......@@ -3036,9 +3081,14 @@ public class ImageDttCPU {
tileX, // int tileX, // just for debug output
tileY); // int tileY
*/
if (debugCluster) {
System.out.println("Before single: clustX="+clustX+", clustY="+clustY+", tileX="+tileX+
", tileY="+tileY+", cTile="+cTile+", cTileX="+cTileX+", cTileY="+cTileY);
}
Corr2dLMA lma2 = correlation2d.corrLMA2Single(
imgdtt_params, // ImageDttParameters imgdtt_params,
false, // false, // boolean adjust_ly, // adjust Lazy Eye
imgdtt_params.lmas_LY_single_LY, // false, // false, // boolean adjust_ly, // adjust Lazy Eye
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs[cTile], // corrs, // double [][] corrs,
......@@ -3055,6 +3105,7 @@ public class ImageDttCPU {
disp_str[cTile] = null;
if (lma2 != null) {
dbg_num_good_lma ++;
disp_str[cTile] = lma2.lmaDisparityStrength(
imgdtt_params.lmas_max_rel_rms, // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
imgdtt_params.lmas_min_strength, // minimal composite strength (sqrt(average amp squared over absolute RMS)
......@@ -3064,53 +3115,176 @@ public class ImageDttCPU {
imgdtt_params.lma_str_scale, // convert lma-generated strength to match previous ones - scale
imgdtt_params.lma_str_offset // convert lma-generated strength to match previous ones - add to result
)[0];
if (tile_lma_debug_level > 0) {
if ((disp_str[cTile]!=null) && Double.isNaN(disp_str[cTile][1])) {
System.out.println();
}
if (debugCluster || (tile_lma_debug_level > 0)) {
double [][] ds_dbg = {disp_str[cTile]};
System.out.println("multi: clustX="+clustX+", clustY="+clustY+", tileX="+tileX+", tileY="+tileY+", dbg_num_good_lma="+dbg_num_good_lma);
lma2.printStats(ds_dbg,1);
}
} else {
if (debugCluster || (tile_lma_debug_level > 0)) {
System.out.println("LMA failed, dbg_num_good_lma="+dbg_num_good_lma);
}
}
}
} // if (globalDebugLevel > 1000) - skip single-tile LMA, similar to GPU version
}
}
}
} //for (int cTileY = 0; cTileY < tileStep; cTileY++) {
dbg_num_good_tiles[1][nCluster] = dbg_num_good_lma;
// above - scanned each tile in a cluster
if (true) { //debugCluster1) {
if (globalDebugLevel > 0) {
if (debugCluster || (globalDebugLevel > 0)) {
System.out.println("Will run new LMA for clustX="+clustX+", clustY="+clustY);
}
/*
Corr2dLMA lma2 = corr2d.corrLMA2Multi(
imgdtt_params, // ImageDttParameters imgdtt_params,
tileStep, // int clust_width,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // [tIndex], // double [][] corrs,
disp_dist, // [tIndex],
rXY, // double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
corr2d.longToArray(imgdtt_params.dbg_pair_mask), // imgdtt_params.dbg_pair_mask // int pair_mask, // which pairs to process
disp_str, // corr_stat, // double[][] xcenter_str, // preliminary center x in pixels for largest baseline
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
clust_lma_debug_level + 0, // 2, // int debug_level, // for a single cluster
clustX, // int tileX, // just for debug output
clustY ); // int tileY
*/
Corr2dLMA lma2 = correlation2d.corrLMA2Multi(
imgdtt_params, // ImageDttParameters imgdtt_params,
tileStep, // int clust_width,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // corrs, // double [][][] corrs,
disp_dist,
rXY, // double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
// all that are not null in corr_tiles
correlation2d.selectAll(), // longToArray(imgdtt_params.dbg_pair_mask), // int pair_mask, // which pairs to process
disp_str, //corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
clust_lma_debug_level + 0, // // +2, // int debug_level,
clustX, // int tileX, // just for debug output
clustY ); // int tileY
Corr2dLMA lma2 = null;
if (imgdtt_params.lma_multi_cons) {
double [][] corrs_cons = new double [correlation2d.getNumPairs()][];
double [][] disp_dist_cons = new double[numSensors][]; // used to correct 3D correlations
double [] disp_str_cons = {0.0,0.0};
int num_tiles = 0;
double sum_w = 0.0;
double sum_wd = 0.0;
for (int nTile = 0; nTile < disp_str.length; nTile++) if (corrs[nTile] != null) { //(disp_str[nTile] != null) {
// Init if it is a first non-null tile
if (num_tiles == 0) {
for (int np = 0; np < corrs_cons.length; np++) if (corrs[nTile][np] != null){
corrs_cons[np] = new double [corrs[nTile][np].length];
}
for (int nsens = 0; nsens < numSensors; nsens++) {
for (int i = 0; i < disp_dist[nTile][nsens].length; i++) {
disp_dist_cons[nsens] = new double [disp_dist[nTile][nsens].length];
}
}
}
double w = disp_str[nTile][1];
sum_w += w;
sum_wd += w * disp_str[nTile][0];
for (int np = 0; np < corrs_cons.length; np++) if (corrs[nTile][np] != null) {
for (int i = 0; i < corrs_cons[np].length; i++) {
corrs_cons[np][i] += w * corrs[nTile][np][i];
}
}
for (int nsens = 0; nsens < numSensors; nsens++) {
for (int i = 0; i < disp_dist_cons[nsens].length; i++) {
disp_dist_cons[nsens][i] += w * disp_dist[nTile][nsens][i];
}
}
num_tiles ++;
}
if ((num_tiles > 0) && (sum_w > 0.0)) {
// if (sum_w > 0.0) {
disp_str_cons[0] = sum_wd/sum_w;
disp_str_cons[1] = sum_w/num_tiles;
// }
double s = 1.0/sum_w; // num_tiles;
for (int np = 0; np < corrs_cons.length; np++) {
if (corrs_cons[np] != null) {
for (int i = 0; i < corrs_cons[np].length; i++) {
corrs_cons[np][i] *= s;
}
}
}
for (int nsens = 0; nsens < numSensors; nsens++) {
for (int i = 0; i < disp_dist_cons[nsens].length; i++) {
disp_dist_cons[nsens][i] *=s;
}
}
// Estimate disparity - consolidate all correlation pairs and find maximum
double [] corr_combo_all = correlation2d.accumulateInit();
double sumw = correlation2d.accummulatePairs(
corr_combo_all, // double [] accum_tile,
corrs_cons, // double [][] corr_tiles,
correlation2d.selectAll(), // boolean [] selection,
1.0); // double weight);
correlation2d.normalizeAccumulatedPairs(
corr_combo_all,
sumw);
// find argmax on y==0
double [] disp_str_combo = new double[2];
int [] ixy = correlation2d.getMaxXYInt( // find integer pair or null if below threshold // USED in lwir
corr_combo_all, // double [] data, // [data_size * data_size]
disp_str_combo,
correlation2d.getCombWidth(), // data_width,
correlation2d.getCombHeight()/2 - correlation2d.getCombOffset(), // int center_row, ??????????????
true, // boolean axis_only,
imgdtt_params.min_corr, // ???? double minMax, // minimal value to consider (at integer location, not interpolated)
false); // debugCluster); // tile_lma_debug_level > 0); // boolean debug);
if (debugCluster) { // && (globalDebugLevel > -1)) { // -2)) {
System.out.println("Will run new LMA for clustX="+clustX+", clustY="+clustY);
if (disp_dist_cons != null) {
System.out.println("disp_dist_cons[0]="+disp_dist_cons[0]+", disp_dist_cons[0]="+disp_dist_cons[0]+" (this tile - weighted average will be discarded)");
}
(new ShowDoubleFloatArrays()).showArrays(corrs_cons, 15, 15, true, "corrs_cons_CX"+clustX+"-CY"+clustY,correlation2d.getCorrTitles());
// mcorr_comb_width, // combined correlation tile width
//mcorr_comb_height, // combined correlation tile full height
(new ShowDoubleFloatArrays()).showArrays(corr_combo_all, mcorr_comb_width, mcorr_comb_height, "corr_combo_all_CX"+clustX+"-CY"+clustY);
}
if (ixy != null) { //TODO - for CM use magic!
double [] corr_stat = correlation2d.getMaxXCm( // get fractional center as a "center of mass" inside circle/square from the integer max
corr_combo_all, // double [] data, // [data_size * data_size]
correlation2d.getCombWidth(), // int data_width, // = 2 * transform_size - 1;
correlation2d.getCombHeight()/2 - correlation2d.getCombOffset(),// int center_row,
ixy[0], // int ixcenter, // integer center x
// corr_wndy, // double [] window_y, // (half) window function in y-direction(perpendicular to disparity: for row0 ==1
// corr_wndx, // double [] window_x, // half of a window function in x (disparity) direction
false); // debugCluster); // (tile_lma_debug_level > 0)); // boolean debug);
if (corr_stat != null) {
disp_str_combo[0] = -corr_stat[0]; // -ixy[0]; for CM use magic???
disp_str_combo[1] = corr_stat[1];
}
if (debugCluster) { // && (globalDebugLevel > -1)) { // -2)) {
System.out.println("Will run new LMA for clustX="+clustX+", clustY="+clustY);
if (disp_str_combo != null) {
System.out.println("disp_str_combo[0]="+disp_str_combo[1]+", disp_str_combo[0]="+disp_str_combo[1]);
}
(new ShowDoubleFloatArrays()).showArrays(corrs_cons, 15, 15, true, "corrs_cons_CX"+clustX+"-CY"+clustY,correlation2d.getCorrTitles());
}
lma2 = correlation2d.corrLMA2Single(
imgdtt_params, // ImageDttParameters imgdtt_params,
true,// false, // false, // boolean adjust_ly, // adjust Lazy Eye
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs_cons, // corrs, // double [][] corrs,
disp_dist_cons,
rXY, // double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
// all that are not null in corr_tiles
correlation2d.selectAll(), // longToArray(imgdtt_params.dbg_pair_mask), // int pair_mask, // which pairs to process
// TODO: Verify sign is correct (disparity, not X0)?
disp_str_combo, // null, // disp_str, //corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
disp_str_combo, // disp_str_cons, // double[] poly_ds, // null or pair of disparity/strength
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
clust_lma_debug_level + 0, // tdl, // tile_lma_debug_level, // +2, // int debug_level,
clustX, // int tileX, // just for debug output
clustY ); // int tileY
}
}
} else {
lma2 = correlation2d.corrLMA2Multi( // slow
imgdtt_params, // ImageDttParameters imgdtt_params,
tileStep, // int clust_width,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // corrs, // double [][][] corrs,
disp_dist,
rXY, // double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
// all that are not null in corr_tiles
correlation2d.selectAll(), // longToArray(imgdtt_params.dbg_pair_mask), // int pair_mask, // which pairs to process
disp_str, //corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
clust_lma_debug_level + 0, // // +2, // int debug_level,
clustX, // int tileX, // just for debug output
clustY ); // int tileY
}
if (lma2 != null) {
double [][] ddnd = lma2.getDdNd();
double [] stats = lma2.getStats(num_good_tiles);
......@@ -3123,29 +3297,85 @@ public class ImageDttCPU {
1.0, // imgdtt_params.lma_str_scale, // convert lma-generated strength to match previous ones - scale
0.0); // imgdtt_params.lma_str_offset); // convert lma-generated strength to match previous ones - add to result
// double [][] extra_stats = lma2.getTileStats();
if (debugCluster) {
System.out.println("ClustX="+clustX+", clustY="+clustY);
lma2.printStats(lma_ds, 1);
if (ddnd != null) {
double [][] dxy= new double [ddnd.length][2];
for (int i = 0; i < dxy.length; i++) {
dxy[i][0] = ddnd[i][0] * rXY[i][0] - ddnd[i][1] * rXY[i][1];
dxy[i][1] = ddnd[i][0] * rXY[i][1] + ddnd[i][1] * rXY[i][0];
}
System.out.print(" Port: ");
for (int i = 0; i < dxy.length; i++) System.out.print(String.format(" %2d ", i)); System.out.println();
System.out.print("Radial_in = [");
for (int i = 0; i < dxy.length; i++) System.out.print(String.format(" %6.3f,", ddnd[i][0])); System.out.println("]");
System.out.print("Tangent_CW = [");
for (int i = 0; i < dxy.length; i++) System.out.print(String.format(" %6.3f,", ddnd[i][1])); System.out.println("]");
System.out.print("X = [");
for (int i = 0; i < dxy.length; i++) System.out.print(String.format(" %6.3f,", dxy[i][0])); System.out.println("]");
System.out.print("Y = [");
for (int i = 0; i < dxy.length; i++) System.out.print(String.format(" %6.3f,", dxy[i][1])); System.out.println("]");
System.out.println();
}
}
// final double [][] lazy_eye_data = new double [clustersY*clustersX][];
// calculate average disparity per cluster using a sum of the disparity_array and the result of the LMA
lazy_eye_data[nCluster] = new double [ExtrinsicAdjustment.get_INDX_LENGTH(numSensors)];
double sum_w = 0;
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
if (tileY < tilesY) {
for (int cTileX = 0; cTileX < tileStep; cTileX++) {
tileX = clustX * tileStep + cTileX ;
if (tileX < tilesX) {
cTile = cTileY * tileStep + cTileX;
if ((lma_ds[cTile] != null) && (lma_ds[cTile][1]> 0.0)) {
double w = lma_ds[cTile][1];
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_DISP] += (lma_ds[cTile][0] + disparity_array[tileY][tileX] + disparity_corr) * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_TARGET] += (disparity_array[tileY][tileX] + disparity_corr) * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_DIFF] += lma_ds[cTile][0] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_PX + 0] += pxpy[cTile][0] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_PX + 1] += pxpy[cTile][1] * w;
for (int cam = 0; cam < numSensors; cam++) {
lazy_eye_data[nCluster][ExtrinsicAdjustment.get_INDX_DYDDISP0(numSensors) + cam] += disp_dist[cTile][cam][2] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.get_INDX_PYDIST(numSensors) + cam] += centersXY[cTile][cam][1] * w;
if (imgdtt_params.lma_multi_cons) {
if ((lma_ds[0] != null) && (lma_ds[0][1]> 0.0)) {
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
if (tileY < tilesY) {
for (int cTileX = 0; cTileX < tileStep; cTileX++) {
tileX = clustX * tileStep + cTileX ;
if (tileX < tilesX) {
cTile = cTileY * tileStep + cTileX;
// if ((lma_ds[cTile] != null) && (lma_ds[cTile][1]> 0.0)) {
if (disp_str[cTile] != null) {
// double w = lma_ds[cTile][1];
double w = disp_str[cTile][1];
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_DISP] += (lma_ds[0][0] + disparity_array[tileY][tileX] + disparity_corr) * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_TARGET] += (disparity_array[tileY][tileX] + disparity_corr) * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_DIFF] += lma_ds[0][0] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_PX + 0] += pxpy[cTile][0] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_PX + 1] += pxpy[cTile][1] * w;
for (int cam = 0; cam < numSensors; cam++) {
lazy_eye_data[nCluster][ExtrinsicAdjustment.get_INDX_DYDDISP0(numSensors) + cam] += disp_dist[cTile][cam][2] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.get_INDX_PYDIST(numSensors) + cam] += centersXY[cTile][cam][1] * w;
}
sum_w += w;
}
}
}
}
}
}
} else {
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
if (tileY < tilesY) {
for (int cTileX = 0; cTileX < tileStep; cTileX++) {
tileX = clustX * tileStep + cTileX ;
if (tileX < tilesX) {
cTile = cTileY * tileStep + cTileX;
if ((lma_ds[cTile] != null) && (lma_ds[cTile][1]> 0.0)) {
double w = lma_ds[cTile][1];
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_DISP] += (lma_ds[cTile][0] + disparity_array[tileY][tileX] + disparity_corr) * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_TARGET] += (disparity_array[tileY][tileX] + disparity_corr) * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_DIFF] += lma_ds[cTile][0] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_PX + 0] += pxpy[cTile][0] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.INDX_PX + 1] += pxpy[cTile][1] * w;
for (int cam = 0; cam < numSensors; cam++) {
lazy_eye_data[nCluster][ExtrinsicAdjustment.get_INDX_DYDDISP0(numSensors) + cam] += disp_dist[cTile][cam][2] * w;
lazy_eye_data[nCluster][ExtrinsicAdjustment.get_INDX_PYDIST(numSensors) + cam] += centersXY[cTile][cam][1] * w;
}
sum_w += w;
}
sum_w += w;
}
}
}
......@@ -3185,7 +3415,9 @@ public class ImageDttCPU {
};
}
startAndJoin(threads);
if (dbg_num_good_tiles != null) {
// (new ShowDoubleFloatArrays()).showArrays(dbg_num_good_tiles, clustersX, clustersY, true, "num_good_tiles"); // , dbg_titles);
}
if ((dbg_distort != null) &&(globalDebugLevel >=0)) {
(new ShowDoubleFloatArrays()).showArrays(dbg_distort, tilesX, tilesY, true, "disparity_distortions"); // , dbg_titles);
}
......@@ -3326,14 +3558,13 @@ public class ImageDttCPU {
}
final boolean [][] combo_sels = pcombo_sels;
final boolean debug_distort= globalDebugLevel > 0; ///false; // true;
// final double [][] debug_offsets = new double[imgdtt_params.lma_dbg_offset.length][2];
final double [][] debug_offsets = null;
/*
final double [][] debug_offsets = new double[getNumSensors()][2];
for (int i = 0; i < imgdtt_params.lma_dbg_offset.length; i++) for (int j = 0; j < debug_offsets[i].length; j++) {
debug_offsets[i][j] = imgdtt_params.lma_dbg_offset[i][j]*imgdtt_params.lma_dbg_scale;
}
// final double [] dbg_corr_shift =((imgdtt_params.pcorr_dbg_offsx != 0.0) || (imgdtt_params.pcorr_dbg_offsy != 0.0))?
// (new double [] {imgdtt_params.pcorr_dbg_offsx,imgdtt_params.pcorr_dbg_offsy}):null;
*/
final boolean macro_mode = macro_scale != 1; // correlate tile data instead of the pixel data
final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
final int nTilesInChn=tilesX*tilesY;
......@@ -4009,7 +4240,7 @@ public class ImageDttCPU {
double [] poly_disp = {Double.NaN, 0.0};
Corr2dLMA lma2 = correlation2d.corrLMA2Single(
imgdtt_params, // ImageDttParameters imgdtt_params,
true, // false, // boolean adjust_ly, // adjust Lazy Eye
imgdtt_params.lmas_LY_single, // false, // boolean adjust_ly, // adjust Lazy Eye
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corr_tiles, // corrs, // double [][] corrs,
......@@ -12725,21 +12956,18 @@ public class ImageDttCPU {
final boolean debug_distort= globalDebugLevel > 0; ///false; // true;
// final double [][] debug_offsets = new double[imgdtt_params.lma_dbg_offset.length][2];
final double [][] debug_offsets = null; // new double[imgdtt_params.lma_dbg_offset.length][2];
/*
final double [][] debug_offsets = new double[getNumSensors()][2];
for (int i = 0; i < imgdtt_params.lma_dbg_offset.length; i++) for (int j = 0; j < debug_offsets[i].length; j++) {
debug_offsets[i][j] = imgdtt_params.lma_dbg_offset[i][j]*imgdtt_params.lma_dbg_scale;
}
*/
final double [] dbg_corr_shift =((imgdtt_params.pcorr_dbg_offsx != 0.0) || (imgdtt_params.pcorr_dbg_offsy != 0.0))?
(new double [] {imgdtt_params.pcorr_dbg_offsx,imgdtt_params.pcorr_dbg_offsy}):null;
final boolean macro_mode = macro_scale != 1; // correlate tile data instead of the pixel data
// final int quad = 4; // number of subcameras
// final int nSens = geometryCorrection.getNumSensors();
final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
// final int numColors = image_data[0].length;
final int height=image_data[0][0].length/width;
final int tilesX=width/transform_size;
final int tilesY=height/transform_size;
......
......@@ -147,7 +147,8 @@ public class ImageDttParameters {
// LMA parameters
public double lma_disp_range = 5.0; // disparity range to combine in one cluster (to mitigate ERS
// LMA single parameters
public boolean lmas_gaussian = false; // model correlation maximum as a Gaussian (false - as a parabola)
// public boolean lmas_gaussian = false; // model correlation maximum as a Gaussian (false - as a parabola)
public int lmas_gaussian = 0; // 0 - parabola, 1 - Gaussian, 2 - limited parabola, 3 - limited squared parabola
public boolean lmas_adjust_wm = true; // used in new for width
public boolean lmas_adjust_wy = true; // adjust non-circular
public boolean lmas_adjust_ag = true; // adjust gains gains
......@@ -155,6 +156,10 @@ public class ImageDttParameters {
public double lmas_poly_str_scale = 1.0; // scale pre-lma poly strength
public double lmas_poly_str_min = 0.05; // ignore tiles with poly strength (scaled) below
public boolean lmas_poly_continue = true; // use center if polynomial argmax fails
public boolean lmas_LY_single = true; // Adjust LY when performing single-tile LMA
public boolean lmas_LY_single_LY = false; // Adjust LY when performing single-tile LMA before LY
public double lmas_lambda_initial = 0.03; //
public double lmas_rms_diff = 0.0003; //
public int lmas_num_iter = 20; ///10
......@@ -165,9 +170,11 @@ public class ImageDttParameters {
public double lmas_min_min_ac = 0.007; // minimal of a and C coefficients minimum (measures sharpest point)
public double lmas_max_area = 0.0; // maximal half-area (if > 0.0)
public boolean lma_gaussian = false; // model correlation maximum as a Gaussian (false - as a parabola)
// public boolean lma_gaussian = false; // model correlation maximum as a Gaussian (false - as a parabola)
public int lma_gaussian = 0; // 0 - parabola, 1 - Gaussian, 2 - limited parabola, 3 - limited squared parabola
public boolean lma_second = true; // re-run LMA after removing weak/failed tiles
public boolean lma_second_gaussian = false; // re-run after removing weal/failed in Gaussian mode
// public boolean lma_second_gaussian = false; // re-run after removing weal/failed in Gaussian mode
public int lma_second_gaussian = 0; // false; // re-run after removing weal/failed in Gaussian mode
public boolean lma_adjust_wm = true; // used in new for width
public boolean lma_adjust_wy = true; // false; // used in new for ellipse
public boolean lma_adjust_wxy = true; // used in new for lazy eye adjust parallel-to-disparity correction
......@@ -198,6 +205,9 @@ public class ImageDttParameters {
public double lma_rms_diff = 0.003; //
public int lma_num_iter = 10; //
// Filtering and strength calculation
public boolean lma_multi_cons = true; // false - run multi-tile LMA on all tiles, true - average each pare over all tiles
public double lma_max_rel_rms = 0.25; // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
public double lma_min_strength = 1.0; // minimal composite strength (sqrt(average amp squared over absolute RMS)
public double lma_min_ac = 0.05; // minimal of a and C coefficients maximum (measures sharpest point/line)
......@@ -502,8 +512,11 @@ public class ImageDttParameters {
gd.addMessage("Single-tile (no lazy eye) only parameters (some are common");
gd.addNumericField("Cluster disparity range", this.lma_disp_range, 3, 6, "pix",
"Disparity range to combine in one cluster (to mitigate ERS");
gd.addCheckbox ("Correlation maximum as gaussian", this.lmas_gaussian,
"Model correlation maximum as a Gaussian exp(-r^2) (false - as a parabola - 1-r^2)");
// gd.addCheckbox ("Correlation maximum as gaussian", this.lmas_gaussian,
// "Model correlation maximum as a Gaussian exp(-r^2) (false - as a parabola - 1-r^2)");
gd.addNumericField ("Correlation maximum function type", this.lmas_gaussian, 0, 3, "",
"0 - parabola - 1-r^2, 1 - exp(-r^2), 2 - limited parabola (>=0), 3 - limited squared parabola (1-r^2)^2");
gd.addCheckbox ("Fit correlation defined half-width", this.lmas_adjust_wm,
"Allow fitting of the half-width common for all pairs, defined by the LPF filter of the phase correlation");
gd.addCheckbox ("Adjust ellipse parameters (was Fit extra vertical half-width)", this.lmas_adjust_wy,
......@@ -516,6 +529,15 @@ public class ImageDttParameters {
"Calculated as maximal value over average radius");
gd.addNumericField("Minimal pre-LMA poly strength (scaled)", this.lmas_poly_str_min, 3, 6, "",
"Ignore tiles with pre-LMA poly strength (scaled with above) below this value");
gd.addCheckbox ("Use center point for initial LMA", this.lmas_poly_continue,
"Use center po8int if polynomial argmax() failed");
gd.addCheckbox ("Adjust LY when performing single-tile LMA (non-LY operations)", this.lmas_LY_single,
"Adjust individual sensor misaligtnments when performing LMA-based argmax() when LY output itself is not requested");
gd.addCheckbox ("Adjust LY when performing single-tile LMA first LY stage", this.lmas_LY_single_LY,
"Adjust individual sensor misaligtnments when performing first pass of LMA-based argmax() before processing clusters for LY");
gd.addMessage("LMA (single) LMA fitting parameters");
gd.addNumericField("Initial value of LMA lambda", this.lmas_lambda_initial, 3, 6, "",
"The higher the lambda the more close it will be to the gradient descent (slower/safer)");
......@@ -536,12 +558,16 @@ public class ImageDttParameters {
"Maximal product of maximum half-width by half-height, ignore check if <=0");
gd.addMessage("Multi-tile (for lazy eye) LMA (some are used for with single-tile mode too)");
gd.addCheckbox ("Correlation maximum as gaussian", this.lma_gaussian,
"Model correlation maximum as a Gaussian exp(-r^2) (false - as a parabola - 1-r^2)");
gd.addNumericField ("Correlation maximum function type", this.lma_gaussian, 0, 3, "",
"0 - parabola - 1-r^2, 1 - exp(-r^2), 2 - limited parabola (>=0), 3 - limited squared parabola (1-r^2)^2");
gd.addCheckbox ("Re-run LMA after removing weak/failed tiles", this.lma_second,
"Re-run LMA with filtered tiles (see Correlation strength calculation section below)");
gd.addCheckbox ("Gaussian mode during LMA re-run", this.lma_second_gaussian,
"Parabola is more stable when using with un-filtered tiles, so it makes sense to use Gaussina only on filtered tiles");
// gd.addCheckbox ("Gaussian mode during LMA re-run", this.lma_second_gaussian,
// "Parabola is more stable when using with un-filtered tiles, so it makes sense to use Gaussina only on filtered tiles");
gd.addNumericField ("Correlation maximum function typedurinf re-run", this.lma_second_gaussian, 0, 3, "",
"0 - parabola - 1-r^2, 1 - exp(-r^2), 2 - limited parabola (>=0), 3 - limited squared parabola (1-r^2)^2."+
" Parabola is more stable when using with un-filtered tiles, so it makes sense to use Gaussina only on filtered tiles");
gd.addCheckbox ("Fit correlation defined half-width", this.lma_adjust_wm,
"Allow fitting of the half-width common for all pairs, defined by the LPF filter of the phase correlation");
......@@ -591,6 +617,9 @@ public class ImageDttParameters {
"Limit LMA cycles, so it will exit after certain number of small improvements");
gd.addMessage("LMA results filtering");
gd.addCheckbox ("Average correlation tiles instead of the multi-tile LMA", this.lma_multi_cons,
"False - run multi-tile LMA on all tiles, true - average each pair over all tiles (as in GPU)");
gd.addNumericField("Maximal relative RMS ", this.lma_max_rel_rms, 6, 8, "",
"Discard tile if ratio of RMS to average of min and max amplitude exceeds this value");
gd.addNumericField("Minimal composite strength", this.lma_min_strength, 6, 8, "",
......@@ -772,7 +801,7 @@ public class ImageDttParameters {
this.pcorr_dbg_offsy = gd.getNextNumber();
//LMA tab
this.lma_disp_range = gd.getNextNumber();
this.lmas_gaussian= gd.getNextBoolean();
this.lmas_gaussian= (int) gd.getNextNumber();
this.lmas_adjust_wm= gd.getNextBoolean();
this.lmas_adjust_wy= gd.getNextBoolean();
this.lmas_adjust_ag= gd.getNextBoolean();
......@@ -780,18 +809,22 @@ public class ImageDttParameters {
this.lmas_poly_str_scale = gd.getNextNumber();
this.lmas_poly_str_min = gd.getNextNumber();
this.lmas_poly_continue= gd.getNextBoolean();
this.lmas_LY_single= gd.getNextBoolean();
this.lmas_LY_single_LY= gd.getNextBoolean();
this.lmas_lambda_initial = gd.getNextNumber();
this.lmas_rms_diff = gd.getNextNumber();
this.lmas_num_iter= (int) gd.getNextNumber();
this.lmas_max_rel_rms = gd.getNextNumber();
this.lmas_min_strength = gd.getNextNumber();
this.lmas_min_ac = gd.getNextNumber();
this.lmas_min_min_ac = gd.getNextNumber();
this.lmas_min_min_ac = gd.getNextNumber();
this.lmas_max_area = gd.getNextNumber();
this.lma_gaussian= gd.getNextBoolean();
this.lma_gaussian= (int) gd.getNextNumber();
this.lma_second= gd.getNextBoolean();
this.lma_second_gaussian= gd.getNextBoolean();
this.lma_second_gaussian= (int) gd.getNextNumber();
this.lma_adjust_wm= gd.getNextBoolean();
this.lma_adjust_wy= gd.getNextBoolean();
this.lma_adjust_wxy= gd.getNextBoolean();
......@@ -817,6 +850,7 @@ public class ImageDttParameters {
this.lma_rms_diff = gd.getNextNumber();
this.lma_num_iter= (int) gd.getNextNumber();
this.lma_multi_cons = gd.getNextBoolean();
this.lma_max_rel_rms = gd.getNextNumber();
this.lma_min_strength = gd.getNextNumber();
this.lma_min_ac = gd.getNextNumber();
......@@ -966,6 +1000,10 @@ public class ImageDttParameters {
properties.setProperty(prefix+"lmas_poly_str_scale", this.lmas_poly_str_scale +"");
properties.setProperty(prefix+"lmas_poly_str_min", this.lmas_poly_str_min +"");
properties.setProperty(prefix+"lmas_poly_continue", this.lmas_poly_continue +"");
properties.setProperty(prefix+"lmas_LY_single", this.lmas_LY_single +"");
properties.setProperty(prefix+"lmas_LY_single_LY", this.lmas_LY_single_LY +"");
properties.setProperty(prefix+"lmas_lambda_initial", this.lmas_lambda_initial +"");
properties.setProperty(prefix+"lmas_rms_diff", this.lmas_rms_diff +"");
properties.setProperty(prefix+"lmas_num_iter", this.lmas_num_iter +"");
......@@ -1004,6 +1042,7 @@ public class ImageDttParameters {
properties.setProperty(prefix+"lma_rms_diff", this.lma_rms_diff +"");
properties.setProperty(prefix+"lma_num_iter", this.lma_num_iter +"");
properties.setProperty(prefix+"lma_multi_cons", this.lma_multi_cons +"");
properties.setProperty(prefix+"lma_max_rel_rms", this.lma_max_rel_rms +"");
properties.setProperty(prefix+"lma_min_strength", this.lma_min_strength +"");
properties.setProperty(prefix+"lma_min_ac", this.lma_min_ac +"");
......@@ -1148,7 +1187,14 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"pcorr_dbg_offsy")!=null) this.pcorr_dbg_offsy=Double.parseDouble(properties.getProperty(prefix+"pcorr_dbg_offsy"));
if (properties.getProperty(prefix+"lma_disp_range")!=null) this.lma_disp_range=Double.parseDouble(properties.getProperty(prefix+"lma_disp_range"));
if (properties.getProperty(prefix+"lmas_gaussian")!=null) this.lmas_gaussian=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_gaussian"));
if (properties.getProperty(prefix+"lmas_gaussian")!=null) {
String lma_function = properties.getProperty(prefix+"lmas_gaussian");
if (lma_function.equals("true") || lma_function.equals("false")) {
this.lmas_gaussian = Boolean.parseBoolean(lma_function)? 1 : 0;
} else {
this.lmas_gaussian=Integer.parseInt(lma_function);
}
}
if (properties.getProperty(prefix+"lmas_adjust_wm")!=null) this.lmas_adjust_wm=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_adjust_wm"));
if (properties.getProperty(prefix+"lmas_adjust_wy")!=null) this.lmas_adjust_wy=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_adjust_wy"));
if (properties.getProperty(prefix+"lmas_adjust_ag")!=null) this.lmas_adjust_ag=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_adjust_ag"));
......@@ -1156,6 +1202,10 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"lmas_poly_str_scale")!=null) this.lmas_poly_str_scale=Double.parseDouble(properties.getProperty(prefix+"lmas_poly_str_scale"));
if (properties.getProperty(prefix+"lmas_poly_str_min")!=null) this.lmas_poly_str_min=Double.parseDouble(properties.getProperty(prefix+"lmas_poly_str_min"));
if (properties.getProperty(prefix+"lmas_poly_continue")!=null) this.lmas_poly_continue=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_poly_continue"));
if (properties.getProperty(prefix+"lmas_LY_single")!=null) this.lmas_LY_single=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_LY_single"));
if (properties.getProperty(prefix+"lmas_LY_single_LY")!=null) this.lmas_LY_single_LY=Boolean.parseBoolean(properties.getProperty(prefix+"lmas_LY_single_LY"));
if (properties.getProperty(prefix+"lmas_lambda_initial")!=null) this.lmas_lambda_initial=Double.parseDouble(properties.getProperty(prefix+"lmas_lambda_initial"));
if (properties.getProperty(prefix+"lmas_rms_diff")!=null) this.lmas_rms_diff=Double.parseDouble(properties.getProperty(prefix+"lmas_rms_diff"));
if (properties.getProperty(prefix+"lmas_num_iter")!=null) this.lmas_num_iter=Integer.parseInt(properties.getProperty(prefix+"lmas_num_iter"));
......@@ -1165,9 +1215,24 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"lmas_min_min_ac")!=null) this.lmas_min_min_ac=Double.parseDouble(properties.getProperty(prefix+"lmas_min_min_ac"));
if (properties.getProperty(prefix+"lmas_max_area")!=null) this.lmas_max_area=Double.parseDouble(properties.getProperty(prefix+"lmas_max_area"));
if (properties.getProperty(prefix+"lma_gaussian")!=null) this.lma_gaussian=Boolean.parseBoolean(properties.getProperty(prefix+"lma_gaussian"));
if (properties.getProperty(prefix+"lma_gaussian")!=null) {
String lma_function = properties.getProperty(prefix+"lma_gaussian");
if (lma_function.equals("true") || lma_function.equals("false")) {
this.lma_gaussian = Boolean.parseBoolean(lma_function)? 1 : 0;
} else {
this.lma_gaussian=Integer.parseInt(lma_function);
}
}
if (properties.getProperty(prefix+"lma_second")!=null) this.lma_second=Boolean.parseBoolean(properties.getProperty(prefix+"lma_second"));
if (properties.getProperty(prefix+"lma_second_gaussian")!=null) this.lma_second_gaussian=Boolean.parseBoolean(properties.getProperty(prefix+"lma_second_gaussian"));
if (properties.getProperty(prefix+"lma_second_gaussian")!=null) {
String lma_function = properties.getProperty(prefix+"lma_second_gaussian");
if (lma_function.equals("true") || lma_function.equals("false")) {
this.lma_second_gaussian = Boolean.parseBoolean(lma_function)? 1 : 0;
} else {
this.lma_second_gaussian=Integer.parseInt(lma_function);
}
}
if (properties.getProperty(prefix+"lma_adjust_wm")!=null) this.lma_adjust_wm=Boolean.parseBoolean(properties.getProperty(prefix+"lma_adjust_wm"));
if (properties.getProperty(prefix+"lma_adjust_wy")!=null) this.lma_adjust_wy=Boolean.parseBoolean(properties.getProperty(prefix+"lma_adjust_wy"));
if (properties.getProperty(prefix+"lma_adjust_wxy")!=null) this.lma_adjust_wxy=Boolean.parseBoolean(properties.getProperty(prefix+"lma_adjust_wxy"));
......@@ -1192,6 +1257,7 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"lma_rms_diff")!=null) this.lma_rms_diff=Double.parseDouble(properties.getProperty(prefix+"lma_rms_diff"));
if (properties.getProperty(prefix+"lma_num_iter")!=null) this.lma_num_iter=Integer.parseInt(properties.getProperty(prefix+"lma_num_iter"));
if (properties.getProperty(prefix+"lma_multi_cons")!=null) this.lma_multi_cons=Boolean.parseBoolean(properties.getProperty(prefix+"lma_multi_cons"));
if (properties.getProperty(prefix+"lma_max_rel_rms")!=null) this.lma_max_rel_rms=Double.parseDouble(properties.getProperty(prefix+"lma_max_rel_rms"));
if (properties.getProperty(prefix+"lma_min_strength")!=null) this.lma_min_strength=Double.parseDouble(properties.getProperty(prefix+"lma_min_strength"));
if (properties.getProperty(prefix+"lma_min_ac")!=null) this.lma_min_ac=Double.parseDouble(properties.getProperty(prefix+"lma_min_ac"));
......@@ -1200,9 +1266,9 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"lma_str_scale")!=null) this.lma_str_scale=Double.parseDouble(properties.getProperty(prefix+"lma_str_scale"));
if (properties.getProperty(prefix+"lma_str_offset")!=null) this.lma_str_offset=Double.parseDouble(properties.getProperty(prefix+"lma_str_offset"));
if (properties.getProperty(prefix+"lma_diff_xy")!=null) this.lma_diff_xy=Boolean.parseBoolean(properties.getProperty(prefix+"lma_diff_xy"));
if (properties.getProperty(prefix+"lma_diff_minw")!=null) this.lma_diff_minw=Double.parseDouble(properties.getProperty(prefix+"lma_diff_minw"));
if (properties.getProperty(prefix+"lma_diff_sigma")!=null) this.lma_diff_sigma=Double.parseDouble(properties.getProperty(prefix+"lma_diff_sigma"));
if (properties.getProperty(prefix+"lma_diff_xy")!=null) this.lma_diff_xy=Boolean.parseBoolean(properties.getProperty(prefix+"lma_diff_xy"));
if (properties.getProperty(prefix+"lma_diff_minw")!=null) this.lma_diff_minw=Double.parseDouble(properties.getProperty(prefix+"lma_diff_minw"));
if (properties.getProperty(prefix+"lma_diff_sigma")!=null) this.lma_diff_sigma=Double.parseDouble(properties.getProperty(prefix+"lma_diff_sigma"));
if (properties.getProperty(prefix+"lma_debug_level")!=null) this.lma_debug_level=Integer.parseInt(properties.getProperty(prefix+"lma_debug_level"));
if (properties.getProperty(prefix+"lma_debug_level1")!=null) this.lma_debug_level1=Integer.parseInt(properties.getProperty(prefix+"lma_debug_level1"));
......@@ -1343,6 +1409,10 @@ public class ImageDttParameters {
idp.lmas_poly_str_scale = this.lmas_poly_str_scale;
idp.lmas_poly_str_min = this.lmas_poly_str_min;
idp.lmas_poly_continue = this.lmas_poly_continue;
idp.lmas_LY_single = this.lmas_LY_single;
idp.lmas_LY_single_LY = this.lmas_LY_single_LY;
idp.lmas_lambda_initial = this.lmas_lambda_initial;
idp.lmas_rms_diff = this.lmas_rms_diff;
idp.lmas_num_iter = this.lmas_num_iter;
......@@ -1379,6 +1449,7 @@ public class ImageDttParameters {
idp.lma_rms_diff = this.lma_rms_diff;
idp.lma_num_iter = this.lma_num_iter;
idp.lma_multi_cons = this.lma_multi_cons;
idp.lma_max_rel_rms= this.lma_max_rel_rms;
idp.lma_min_strength= this.lma_min_strength;
idp.lma_min_ac= this.lma_min_ac;
......
......@@ -5034,7 +5034,13 @@ public class QuadCLT extends QuadCLTCPU {
clt_parameters.shift_x, // final double shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final double shiftY, // shift image vertically (positive - down)
clt_parameters.tileStep, // final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
clt_parameters.img_dtt.getMcorrSelLY(getNumSensors()), // final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
clt_parameters.img_dtt.getMcorrSelLY(getNumSensors()), // final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
clt_parameters.img_dtt.mcorr_comb_width, // final int mcorr_comb_width, // combined correlation tile width
clt_parameters.img_dtt.mcorr_comb_height, // final int mcorr_comb_height, // combined correlation tile full height
clt_parameters.img_dtt.mcorr_comb_offset, // final int mcorr_comb_offset, // combined correlation tile height offset: 0 - centered (-height/2 to height/2), height/2 - only positive (0 to height)
clt_parameters.img_dtt.mcorr_comb_disp, // final double mcorr_comb_disp, // Combined tile per-pixel disparity for baseline == side of a square
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
threadsMax, // final int threadsMax, // maximal number of threads to launch
......
......@@ -5666,7 +5666,13 @@ public class QuadCLTCPU {
clt_parameters.shift_x, // final double shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final double shiftY, // shift image vertically (positive - down)
clt_parameters.tileStep, // final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
clt_parameters.img_dtt.getMcorrSelLY(getNumSensors()), // final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
clt_parameters.img_dtt.getMcorrSelLY(getNumSensors()), // final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
clt_parameters.img_dtt.mcorr_comb_width, // final int mcorr_comb_width, // combined correlation tile width
clt_parameters.img_dtt.mcorr_comb_height, // final int mcorr_comb_height, // combined correlation tile full height
clt_parameters.img_dtt.mcorr_comb_offset, // final int mcorr_comb_offset, // combined correlation tile height offset: 0 - centered (-height/2 to height/2), height/2 - only positive (0 to height)
clt_parameters.img_dtt.mcorr_comb_disp, // final double mcorr_comb_disp, // Combined tile per-pixel disparity for baseline == side of a square
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
threadsMax, // final int threadsMax, // maximal number of threads to launch
......@@ -8203,6 +8209,10 @@ public class QuadCLTCPU {
int op = ImageDtt.setImgMask(0, 0xf);
op = ImageDtt.setPairMask(op,0xf);
op = ImageDtt.setForcedDisparity(op,true);
bg_scan.setSelected(bg_sel);
combo_scan.setSelected(combo_use);
bg_scan.setStrength(data[1]); // will not be used
combo_scan.setStrength(data[3]);
for (int ty = 0; ty < height; ty++) {
for (int tx = 0; tx < width; tx++) {
int indx = ty*width+tx;
......@@ -8218,6 +8228,123 @@ public class QuadCLTCPU {
return true;
}
// for now works only from file (using
public void updateScansForLY(
int bg_scan_indx,
int combo_scan_indx,
boolean top_bg, // all above bg is bg
int fill_gaps_bg, // 1 - in 4 directions by 1, 2 - in 8 directions by 1,
int fill_gaps_combo, // 1 - in 4 directions by 1, 2 - in 8 directions by 1,
boolean use_strength, // weight average disparity by strength
double scale_derivative_strength // 1.0 - new strength - average of neibs, 0.5 - only 1/2 of neibs
) {
int op = ImageDtt.setImgMask(0, 0xf);
op = ImageDtt.setPairMask(op,0xf);
op = ImageDtt.setForcedDisparity(op,true);
int width = tp.getTilesX();
int height = tp.getTilesY();
CLTPass3d bg_scan = tp.clt_3d_passes.get(bg_scan_indx);
CLTPass3d combo_scan = tp.clt_3d_passes.get(combo_scan_indx);
TileNeibs tn = new TileNeibs(width,height);
// int [][] bg_tile_op = bg_scan.tile_op;
// int [][] combo_tile_op = combo_scan.tile_op;
boolean [] bg_sel = bg_scan.getSelected();
boolean [] combo_sel = combo_scan.getSelected();
double [] combo_disparity = new double [width*height];
{
int indx = 0;
for (int ty = 0; ty < height; ty++) {
for (int tx = 0; tx < width; tx++) {
combo_disparity[indx++] = combo_scan.disparity[ty][tx];
}
}
}
double [] combo_strength = null;
if (use_strength) {
combo_strength = combo_scan.getStrength();
}
if (fill_gaps_bg > 0) {
tp.growTiles(
fill_gaps_bg, // grow tile selection by 1 over non-background tiles 1: 4 directions, 2 - 8 directions, 3 - 8 by 1, 4 by 1 more
bg_sel,
null); // prohibit
}
if (top_bg) {
for (int indx = width; indx < bg_sel.length; indx++) {
if (bg_sel[indx]) {
int indx_up = tn.getNeibIndex(indx, TileNeibs.DIR_N);
if ((indx_up >= 0) && !bg_sel[indx_up]) {
while (indx_up >= 0) {
bg_sel[indx_up] = true;
indx_up = tn.getNeibIndex(indx_up, TileNeibs.DIR_N);
}
}
}
}
}
bg_scan.setSelected(bg_sel); // maybe not needed, as it is already the same array
// remove from combo all bg
for (int indx = 0; indx < bg_sel.length; indx++) {
combo_sel[indx] &= !bg_sel[indx];
}
// Fill gaps
for (; fill_gaps_combo > 0; fill_gaps_combo--) {
boolean [] sel_new = combo_sel.clone();
int num_sel =0;
for (int i = 0; i < sel_new.length; i++) if (sel_new[i]) num_sel++;
System.out.println("num_sel = "+num_sel);
tp.growTiles(
1, // grow tile selection by 1 over non-background tiles 1: 4 directions, 2 - 8 directions, 3 - 8 by 1, 4 by 1 more
sel_new,
bg_sel); // prohibit
num_sel =0;
for (int i = 0; i < sel_new.length; i++) if (sel_new[i]) num_sel++;
System.out.println("num_sel grown = "+num_sel);
for (int indx = 0; indx < combo_sel.length; indx++) {
if (!combo_sel[indx] && sel_new[indx]) {
double sum_w = 0.0;
double sum_wd = 0.0;
double sum_d = 0.0;
int num_neibs = 0;
for (int dir = 0; dir< 8; dir++) {
int indx1 = tn.getNeibIndex(indx, dir);
if ((indx1 >= 0) && combo_sel[indx1]) {
double w = (combo_strength != null) ? combo_strength[indx1]:1.0;
sum_w += w;
sum_d += combo_disparity[indx1];
sum_wd += w * combo_disparity[indx1];
num_neibs++;
}
}
// num_neibs should be > 0;
if (combo_strength != null) {
combo_strength[indx] = scale_derivative_strength * sum_w/num_neibs;
}
if (sum_w > 0) {
combo_disparity[indx] = sum_wd/sum_w;
} else {
combo_disparity[indx] = sum_d/num_neibs;
}
}
}
combo_sel = sel_new;
}
combo_scan.setSelected(combo_sel); // maybe not needed, as it is already the same array
// prepare tile_op and disparity for
for (int ty = 0; ty < height; ty++) {
for (int tx = 0; tx < width; tx++) {
int indx = ty * width + tx;
bg_scan.tile_op[ty][tx] = bg_sel[indx] ? op: 0;
combo_scan.tile_op[ty][tx] = combo_sel[indx] ? op: 0;
bg_scan.disparity[ty][tx] = bg_sel[indx]? 0.0: Double.NaN;
combo_scan.disparity[ty][tx] = combo_sel[indx]? combo_disparity[indx]: Double.NaN;
}
}
}
public boolean extrinsicsCLT(
CLTParameters clt_parameters,
String dbg_path, // if not null - read extrinsics_bgnd_combo file instead of extrinsics_prepare
......@@ -8239,15 +8366,16 @@ public class QuadCLTCPU {
debugLevel);
}
final boolean batch_mode = false; // clt_parameters.batch_run;
int debugLevelInner = batch_mode ? -5: debugLevel;
boolean update_disp_from_latest = clt_parameters.lym_update_disp ; // true;
int max_tries = clt_parameters.lym_iter; // 25;
double min_sym_update = clt_parameters.getLymChange(is_aux); // 4e-6; // stop iterations if no angle changes more than this
double min_poly_update = clt_parameters.lym_poly_change; // Parameter vector difference to exit from polynomial correction
int bg_scan = 0;
int bg_scan = 0+0;
int combo_scan= tp.clt_3d_passes.size()-1;
AlignmentCorrection ac = null;
if (!clt_parameters.ly_lma_ers ) {
ac = new AlignmentCorrection(this);
......@@ -8261,11 +8389,37 @@ public class QuadCLTCPU {
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_post"); //String title)
// tp.showScan(
// tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
// "combo_measured_scan-"+combo_scan+"_post"); //String title)
}
// Increase density before LY
if (clt_parameters.lym_mod_map) { // may not work when running directly, w/o getPreparedExtrinsics(dbg_path)
updateScansForLY(
bg_scan, // int bg_scan_indx,
combo_scan, // int combo_scan_indx,
clt_parameters.lym_top_bg , // boolean top_bg, // all above bg is bg
clt_parameters.lym_fill_gaps_bg , // int fill_gaps_combo, // 1 - in 4 directions by 1, 2 - in 8 directions by 1,
clt_parameters.lym_fill_gaps_combo , // int fill_gaps_bg, // 1 - in 4 directions by 1, 2 - in 8 directions by 1,
clt_parameters.lym_use_strength , // boolean use_strength,
clt_parameters.lym_scale_deriv_str); // double scale_derivative_strength, // 1.0 - new strength - average of neibs, 0.5 - only 1/2 of neibs
}
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel >-1)) {
//if (clt_parameters.show_extrinsic && (debugLevel > -1)) { // temporary
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_post_mod"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_measured_scan-"+combo_scan+"_post"); //String title)
"combo_scan-"+combo_scan+"_post_mod"); //String title)
// tp.showScan(
// tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
// "combo_measured_scan-"+combo_scan+"_post_mod"); //String title)
}
double comp_diff = min_sym_update + 1; // (> min_sym_update)
for (int num_iter = 0; num_iter < max_tries; num_iter++){
......@@ -8310,12 +8464,20 @@ public class QuadCLTCPU {
boolean apply_extrinsic = (clt_parameters.ly_corr_scale != 0.0);
CLTPass3d scan = tp.clt_3d_passes.get(combo_scan);
// for the second half of runs (always for single run) - limit infinity min/max
double min_strength = 0.1; // 0.23;
int [] pfmt = {8, 3};
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel >-1)) {
ea.showInput(scan.getLazyEyeData(),"first_data");
System.out.println(ea.stringWeightedLY(
scan.getLazyEyeData(), // double [][] data,
null, // double [][] ref_data,
min_strength, // double min_strength,
pfmt, // int [] format,
"_00")); // String suffix))
}
boolean debug_actual_LY_derivs = debugLevel > 9; // true
boolean use_tarz = false;
if (debug_actual_LY_derivs) {
debugLYDerivatives(
ea, // ExtrinsicAdjustment ea,
......@@ -8324,7 +8486,8 @@ public class QuadCLTCPU {
false, // boolean update_disparity, // re-measure disparity before measuring LY
threadsMax, // final int threadsMax, // maximal number of threads to launch
updateStatus, //final boolean updateStatus,
1E-2, // 1E-3, // double delta,
0.01, // 0.001, // 3.3333E-3, // double delta,
use_tarz, // boolean use_tarz, // derivatives by tarz, notg symmetrical vectors
debugLevel); // final int debugLevel)
}
......@@ -11720,7 +11883,13 @@ public class QuadCLTCPU {
clt_parameters.shift_x, // final double shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final double shiftY, // shift image vertically (positive - down)
clt_parameters.tileStep, // final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
clt_parameters.img_dtt.getMcorrSelLY(getNumSensors()), // final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
clt_parameters.img_dtt.getMcorrSelLY(getNumSensors()), // final int mcorr_sel, // +1 - all, +2 - dia, +4 - sq, +8 - neibs, +16 - hor + 32 - vert
clt_parameters.img_dtt.mcorr_comb_width, // final int mcorr_comb_width, // combined correlation tile width
clt_parameters.img_dtt.mcorr_comb_height, // final int mcorr_comb_height, // combined correlation tile full height
clt_parameters.img_dtt.mcorr_comb_offset, // final int mcorr_comb_offset, // combined correlation tile height offset: 0 - centered (-height/2 to height/2), height/2 - only positive (0 to height)
clt_parameters.img_dtt.mcorr_comb_disp, // final double mcorr_comb_disp, // Combined tile per-pixel disparity for baseline == side of a square
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
threadsMax, // final int threadsMax, // maximal number of threads to launch
......@@ -12533,9 +12702,10 @@ public class QuadCLTCPU {
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
double delta,
boolean use_tarz, // derivatives by tarz, notg symmetrical vectors
final int debugLevel)
{
delta = 0.0003;
// delta = 0.001;
/*double [] parameter_scales4 = { // multiply delay for each parameter
0.3, // 0.014793657667505566, // 00 10 tilt0
0.3, // 0.015484017460841183, // 01 10 tilt1
......@@ -12581,7 +12751,7 @@ public class QuadCLTCPU {
int num_sensors=getNumSensors();
double [] parameter_scales = new double [corr_vector.getLength()];
for (int i = 0; i < num_sensors; i++) {
parameter_scales [corr_vector.getRollIndex()+ i] = (i > 0) ? scale_rl : scale_rl0;
parameter_scales [corr_vector.getRollIndex()+ i] = ((i > 0) || use_tarz)? scale_rl : scale_rl0;
if (i < num_sensors - 1) {
parameter_scales[corr_vector.getTiltIndex()+ i]=scale_tl;
parameter_scales[corr_vector.getAzimuthIndex()+i]=scale_az;
......@@ -12615,22 +12785,47 @@ public class QuadCLTCPU {
ly_initial, // double[][] data,
"drv_reference");// String title);
}
String [] titles = corr_vector.getCorrNames(); // new String [num_pars]; //ea.getSymNames(); // why "S" here, while it is tarz???
// for (int i = 0; i < num_pars; i++) {
// titles[i] = "S"+i;
// }
// String [] titles = corr_vector.getCorrNames(); // new String [num_pars]; //ea.getSymNames(); // why "S" here, while it is tarz???
// geometryCorrection.getCorrVector(par_inc,null) converts sym -> tarz
String [] titles;
if (use_tarz) {
titles = corr_vector.getCorrNames();
} else {
titles = new String [num_pars]; //ea.getSymNames(); // why "S" here, while it is tarz???
for (int i = 0; i < num_pars; i++) {
titles[i] = "S"+i;
}
}
System.out.println("Initial:\n"+corr_vector.toString(true)); // true - short out
double min_strength = 0.1; // 0.23
int [] pfmt = {8,3};
if (debugLevel > -3) {
System.out.println(ea.stringWeightedLY(
scan.getLazyEyeData(), // double [][] data,
null, // double [][] ref_data,
min_strength, // double min_strength,
pfmt, // int [] format,
"_00")); // String suffix))
}
for (int npar = 0; npar < num_pars; npar++) {
// perform asymmetric delta
double [] par_inc = new double [num_pars];
par_inc[npar] = delta * parameter_scales[npar];
CorrVector corr_delta = geometryCorrection.getCorrVector(par_inc,null); // , par_mask); all parameters
CorrVector corr_delta;
if (use_tarz) {
corr_delta = new CorrVector (geometryCorrection,par_inc);
} else {
corr_delta = geometryCorrection.getCorrVector(par_inc,null); // , par_mask); all parameters
}
CorrVector corr_vectorp = corr_vector.clone();
corr_vectorp.incrementVector(corr_delta, 1.0); // 0.5 for p/m
geometryCorrection.setCorrVector(corr_vectorp) ;
double rdelta = 1.0/ par_inc[npar];
// System.out.println("S"+npar+" scale="+rdelta); // +"\n"+(geometryCorrection.getCorrVector().toString()));
System.out.println(npar+": "+ titles[npar]+", scale="+rdelta); // +"\n"+(geometryCorrection.getCorrVector().toString()));
System.out.println("delta:\n"+corr_delta.toString(true)); // true - short out
System.out.println("vector:\n"+corr_vectorp.toString(true)); // true - short out
gpuResetCorrVector();
if (update_disparity) {
CLTMeasureCorr( // perform single pass according to prepared tiles operations and disparity
......@@ -12670,12 +12865,38 @@ public class QuadCLTCPU {
ea.showInput(
ly, // double[][] data,
"drv_par"+npar+"-B");// String title);
*/
*/
// double min_strength = 0.23;
// int [] pfmt = {8,3};
if (debugLevel > -3) {
System.out.println(ea.stringWeightedLY(
ly, // double [][] data,
null, // double [][] ref_data,
min_strength, // double min_strength,
pfmt, // int [] format,
"_"+titles[npar])); // String suffix))
}
for (int cluster = 0; cluster < clusters; cluster++) if ((ly_initial[cluster] != null) && (ly[cluster]!=null)){
for (int nl = 0; nl < ly_initial[cluster].length; nl++) {
ly_diff[npar][nl][cluster] = rdelta * (ly[cluster][nl] - ly_initial[cluster][nl]);
}
}
if (debugLevel > -3) {
double [][] ly_diff1 = new double [ly_initial.length][];
for (int cluster = 0; cluster < clusters; cluster++) if ((ly_initial[cluster] != null) && (ly[cluster]!=null)){
ly_diff1[cluster] = new double [ly_initial[cluster].length];
for (int nl = 0; nl < ly_initial[cluster].length; nl++) {
ly_diff1[cluster][nl] = rdelta * (ly[cluster][nl] - ly_initial[cluster][nl]);
}
}
System.out.println(ea.stringWeightedLY(
ly_diff1, // double [][] data,
ly_initial, // double [][] ref_data,
min_strength, // double min_strength,
pfmt, // int [] format,
"_d"+titles[npar])); // String suffix))
}
}
geometryCorrection.setCorrVector(corr_vector) ; // restore
gpuResetCorrVector();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment