Commit f580e71e authored by Andrey Filippov's avatar Andrey Filippov

tested new fiel calibration with ers

parent b47dba20
......@@ -25,7 +25,7 @@ public class CLTParameters {
public double shift_y = 0.0;
public int tileStep = 4; // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
public int iclt_mask = 15; // which transforms to combine
public int tileX = 258; // number of kernel tile (0..163)
public int tileX = -258; // number of kernel tile (0..163)
public int tileY = 133; // number of kernel tile (0..122)
public int dbg_mode = 0; // 0 - normal, +1 - no DCT/IDCT
public int ishift_x = 0; // debug feature - shift source image by this pixels left
......@@ -165,32 +165,37 @@ public class CLTParameters {
public boolean inf_restore_disp = true; // Add disparity back to d{x,y}[i] (debug feature)
// Lazy eye parameters
public double ly_gt_strength = 0.18; // use some configurable parameters
public boolean ly_gt_use_wnd = true;
public double ly_gt_rms = 0.2; // split small source samples tp FG/BG if all aux tile RMS exceeds this value
public boolean ly_lma_ers = true; // Use 2020 LMA-based measurement of mismatch
public double ly_gt_strength = 0.18; // use some configurable parameters
public boolean ly_gt_use_wnd = true; //
public double ly_gt_rms = 0.2; // split small source samples to FG/BG if all aux tile RMS exceeds this value
// boolean split_fg_bg = true;
// boolean for_adjust = false;
public double ly_marg_fract = 0.2; // part of half-width, and half-height to reduce weights
public boolean ly_on_scan = true; // Calculate and apply lazy eye correction after disparity scan (poly or extrinsic)
public boolean ly_inf_en = true; // Simultaneously correct disparity at infinity (both poly and extrinsic)
public int ly_min_forced = 20; // Minimal number of clusters with forced disparity to use it
public boolean ly_aztilt_en = true; // Adjust azimuths and tilts
public boolean ly_diff_roll_en = true; // Adjust differential rolls (3 of 4 angles)
public boolean ly_focalLength= true; // Correct scales (focal length temperature? variations)
public boolean ly_com_roll= false; // Enable common roll (valid for high disparity range only)
public boolean ly_ers_rot= true; // Enable ERS correction of the camera rotation
public boolean ly_ers_lin= false; // Enable ERS correction of the camera linear movement
public boolean ly_ers_forw= true; // Enable ERS correction of the camera linear movement in z direction
public boolean ly_ers_side= false; // true; // Enable ERS correction of the camera linear movement in x direction
public boolean ly_ers_vert= false; // true; // Enable ERS correction of the camera linear movement in y direction
public int ly_par_sel = 0; // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use checkbox selections above)
public int ly_debug_level = 0; // LY debug level
public boolean ly_right_left= false; // equalize weights of right/left FoV (use with horizon in both halves and gross infinity correction)
public boolean ly_right_left= true; // equalize weights of right/left FoV (use with horizon in both halves and gross infinity correction)
public int ly_per_quad = 10; // minimal tiles per quadrant (not counting the worst) tp proceed
public double ly_per_quad_r = 0.01; // minimal relative tiles per quadrant (not counting the worst) tp proceed
public double ly_per_quad_r = 0.003; // minimal relative tiles per quadrant (not counting the worst) tp proceed
public int ly_inf = 10; // minimal number of tiles at infinity to proceed
public double ly_inf_r = 0.01; // minimal relative number of tiles at infinity to proceed
public double ly_inf_r = 0.0; //0.01; // minimal relative number of tiles at infinity to proceed
public int ly_inf_scale = 20; // minimal number of tiles at infinity to apply weight scaling
public double ly_inf_scale_r = 0.02; // minimal relative number of tiles at infinity to apply weight scaling
......@@ -203,7 +208,7 @@ public class CLTParameters {
public int ly_smpl_side = 3; // Sample size (side of a square) disp/strength filter
public int ly_smpl_num = 5; // Number after removing worst (should be >1)
// public double ly_meas_disp = 1.5; // Maximal measured relative disparity - using (0.8*disp_scan_step)
public double ly_smpl_rms = 0.2; // 1; // Maximal RMS of the remaining tiles in a sample
public double ly_smpl_rms = 0.05; // 1; // Maximal RMS of the remaining tiles in a sample
public double ly_disp_var = 0.1; // Maximal full disparity difference to 8 neighbors
public double ly_disp_rvar = 0.01; // Maximal relative full disparity difference to 8 neighbors
......@@ -218,18 +223,18 @@ public class CLTParameters {
public double lym_overexp = 0.0001; // Any (near) saturated pixels - discard tile (see sat_level also)
public boolean lym_update_disp = true; // Update target disparity after each step
public int lym_iter = 25; // Maximal number of iterations
private double lym_change = 1e-5; // Parameter vector difference to exit 4e-6 - OK
private double lym_change = 0.5e-5; // Parameter vector difference to exit 4e-6 - OK
private double lym_change_aux = 1e-4; // same for aux camera (currntly)lwir
public double lym_poly_change = 0.002; // Parameter vector difference to exit from polynomial correction
public boolean lyf_filter = true; // Filter lazy eye pairs by their values
public int lyf_smpl_side = 8; // 8 x8 masked, 16x16 sampled
public double lyf_rms_max = 0.25; // Maximal RMS (all components to components average)
public boolean lyf_filter = false; // Filter lazy eye pairs by their values
public int lyf_smpl_side = 3; // 8 x8 masked, 16x16 sampled
public double lyf_rms_max = 0.1; // Maximal RMS (all components to components average)
public double lyf_frac_keep = 0.5; // Keep best fit samples, discard worst
public int lyf_min_samples = 5; // Minimal number of tiles remaining in the sample
public boolean lyf_norm_center = true; // Replace samples with a single average with equal weight
public double ly_corr_scale = 1.0; // Scale calculated correction vector
public boolean lyr_filter_ds = false; // true;
public boolean lyr_filter_ds = false; // true;
public boolean lyr_filter_lyf = false; // ~clt_parameters.lyf_filter, but may be different, now off for a single cameras
......@@ -960,18 +965,26 @@ public class CLTParameters {
properties.setProperty(prefix+"ih_norm_center", this.ih_norm_center+"");
properties.setProperty(prefix+"inf_restore_disp", this.inf_restore_disp+"");
properties.setProperty(prefix+"ly_lma_ers", this.ly_lma_ers+"");
properties.setProperty(prefix+"ly_gt_strength", this.ly_gt_strength+"");
properties.setProperty(prefix+"ly_gt_use_wnd", this.ly_gt_use_wnd+"");
properties.setProperty(prefix+"ly_gt_rms", this.ly_gt_rms+"");
properties.setProperty(prefix+"ly_marg_fract", this.ly_marg_fract+"");
properties.setProperty(prefix+"ly_on_scan", this.ly_on_scan+"");
properties.setProperty(prefix+"ly_inf_en", this.ly_inf_en+"");
properties.setProperty(prefix+"ly_min_forced", this.ly_min_forced+"");
properties.setProperty(prefix+"ly_aztilt_en", this.ly_aztilt_en+"");
properties.setProperty(prefix+"ly_diff_roll_en", this.ly_diff_roll_en+"");
properties.setProperty(prefix+"ly_focalLength", this.ly_focalLength+"");
properties.setProperty(prefix+"ly_com_roll", this.ly_com_roll+"");
properties.setProperty(prefix+"ly_ers_rot", this.ly_ers_rot+"");
properties.setProperty(prefix+"ly_ers_lin", this.ly_ers_lin+"");
properties.setProperty(prefix+"ly_ers_forw", this.ly_ers_forw+"");
properties.setProperty(prefix+"ly_ers_side", this.ly_ers_side+"");
properties.setProperty(prefix+"ly_ers_vert", this.ly_ers_vert+"");
properties.setProperty(prefix+"ly_par_sel", this.ly_par_sel+"");
properties.setProperty(prefix+"ly_debug_level", this.ly_debug_level+"");
......@@ -1682,17 +1695,27 @@ public class CLTParameters {
if (properties.getProperty(prefix+"ih_norm_center")!=null) this.ih_norm_center=Boolean.parseBoolean(properties.getProperty(prefix+"ih_norm_center"));
if (properties.getProperty(prefix+"inf_restore_disp")!=null) this.inf_restore_disp=Boolean.parseBoolean(properties.getProperty(prefix+"inf_restore_disp"));
if (properties.getProperty(prefix+"ly_lma_ers")!=null) this.ly_lma_ers=Boolean.parseBoolean(properties.getProperty(prefix+"ly_lma_ers"));
if (properties.getProperty(prefix+"ly_gt_strength")!=null) this.ly_gt_strength=Double.parseDouble(properties.getProperty(prefix+"ly_gt_strength"));
if (properties.getProperty(prefix+"ly_gt_use_wnd")!=null) this.ly_gt_use_wnd=Boolean.parseBoolean(properties.getProperty(prefix+"ly_gt_use_wnd"));
if (properties.getProperty(prefix+"ly_gt_rms")!=null) this.ly_gt_rms=Double.parseDouble(properties.getProperty(prefix+"ly_gt_rms"));
if (properties.getProperty(prefix+"ly_marg_fract")!=null) this.ly_marg_fract=Double.parseDouble(properties.getProperty(prefix+"ly_marg_fract"));
if (properties.getProperty(prefix+"ly_on_scan")!=null) this.ly_on_scan=Boolean.parseBoolean(properties.getProperty(prefix+"ly_on_scan"));
if (properties.getProperty(prefix+"ly_inf_en")!=null) this.ly_inf_en=Boolean.parseBoolean(properties.getProperty(prefix+"ly_inf_en"));
if (properties.getProperty(prefix+"ly_min_forced")!=null) this.ly_min_forced=Integer.parseInt(properties.getProperty(prefix+"ly_min_forced"));
if (properties.getProperty(prefix+"ly_aztilt_en")!=null) this.ly_aztilt_en=Boolean.parseBoolean(properties.getProperty(prefix+"ly_aztilt_en"));
if (properties.getProperty(prefix+"ly_diff_roll_en")!=null) this.ly_diff_roll_en=Boolean.parseBoolean(properties.getProperty(prefix+"ly_diff_roll_en"));
if (properties.getProperty(prefix+"ly_focalLength")!=null) this.ly_focalLength=Boolean.parseBoolean(properties.getProperty(prefix+"ly_focalLength"));
if (properties.getProperty(prefix+"ly_com_roll")!=null) this.ly_com_roll=Boolean.parseBoolean(properties.getProperty(prefix+"ly_com_roll"));
if (properties.getProperty(prefix+"ly_ers_rot")!=null) this.ly_ers_rot=Boolean.parseBoolean(properties.getProperty(prefix+"ly_ers_rot"));
if (properties.getProperty(prefix+"ly_ers_lin")!=null) this.ly_ers_lin=Boolean.parseBoolean(properties.getProperty(prefix+"ly_ers_lin"));
if (properties.getProperty(prefix+"ly_ers_forw")!=null) this.ly_ers_forw=Boolean.parseBoolean(properties.getProperty(prefix+"ly_ers_forw"));
if (properties.getProperty(prefix+"ly_ers_side")!=null) this.ly_ers_side=Boolean.parseBoolean(properties.getProperty(prefix+"ly_ers_side"));
if (properties.getProperty(prefix+"ly_ers_vert")!=null) this.ly_ers_vert=Boolean.parseBoolean(properties.getProperty(prefix+"ly_ers_vert"));
if (properties.getProperty(prefix+"ly_par_sel")!=null) this.ly_par_sel=Integer.parseInt(properties.getProperty(prefix+"ly_par_sel"));
if (properties.getProperty(prefix+"ly_debug_level")!=null) this.ly_debug_level=Integer.parseInt(properties.getProperty(prefix+"ly_debug_level"));
......@@ -2444,25 +2467,35 @@ public class CLTParameters {
gd.addCheckbox ("Replace samples with a single average with equal weight", this.ih_norm_center);
gd.addCheckbox ("Add disparity back to d{x,y}[i] (debug feature)", this.inf_restore_disp);
gd.addTab ("Lazy eye", "Lazy eye parameters");
gd.addCheckbox ("Use 2020 LMA-based measurement of mismatch", this.ly_lma_ers);
gd.addMessage ("--- main-to-aux depth map parameters ---");
gd.addNumericField("Minimal reference (main) channel correlation strength", this.ly_gt_strength, 3);
gd.addNumericField("Minimal reference (main) channel correlation strength", this.ly_gt_strength, 3);
gd.addCheckbox ("Use window for AUX tiles to reduce weight of the hi-res tiles near low-res tile boundaries", this.ly_gt_use_wnd);
gd.addNumericField("Aux disparity thershold to split FG and BG (and disable AUX tile for adjustment)", this.ly_gt_rms, 3);
gd.addMessage ("--- others ---");
gd.addNumericField("Relative weight margins (0.0 - all 1.0, 1.0 sin^2", this.ly_marg_fract, 8,3,"",
"Reduce weigt of peripheral tiles");
gd.addCheckbox ("Calculate and apply lazy eye correction after disparity scan (poly or extrinsic), may repeat", this.ly_on_scan);
gd.addCheckbox ("Adjust disparity using objects at infinity by changing individual tilt and azimuth ", this.ly_inf_en," disable if there are no really far objects in the scene");
gd.addNumericField("Minimal number of clusters with forced disparity to use it (otherwise keep current)",this.ly_min_forced, 0);
gd.addCheckbox ("Adjust azimuths and tilts", this.ly_aztilt_en,"Adjust azimuths and tilts excluding those that change disparity");
gd.addCheckbox ("Adjust differential rolls", this.ly_diff_roll_en,"Adjust differential rolls (3 of 4 rolls, keeping average roll)");
gd.addCheckbox ("Correct scales (focal length temperature? variations)", this.ly_focalLength);
gd.addCheckbox ("Enable common roll adjustment (valid for high disparity range scans only)", this.ly_com_roll);
gd.addCheckbox ("Enable ERS correction of the camera rotation", this.ly_ers_rot);
gd.addCheckbox ("Enable ERS correction of the camera linear movement", this.ly_ers_lin);
gd.addCheckbox ("Enable ERS correction of the camera forward motion", this.ly_ers_forw);
gd.addCheckbox ("Enable ERS correction of the camera sideways motion", this.ly_ers_side);
gd.addCheckbox ("Enable ERS correction of the camera vertical motion", this.ly_ers_vert);
gd.addNumericField("Manual parameter mask selection (0 use checkboxes above)", this.ly_par_sel, 0, 5,"",
"bit 0 - sym0, bit1 - sym1, ...");
gd.addNumericField("Debug level for lazy eye/ers processing", this.ly_debug_level, 0, 5,"",
"Active when global debug level > -1");
"Active when global debug level > -1, 1 - min, 2 - lma steps, 3 - images");
......@@ -2477,14 +2510,14 @@ public class CLTParameters {
gd.addNumericField("Minimal number of tiles at infinity to apply weight scaling", this.ly_inf_scale, 0);
gd.addNumericField("Minimal number of tiles at infinity to apply weight scaling - fraction of all tiles", this.ly_inf_scale_r, 3);
gd.addNumericField("Relative weight of infinity calibration data", this.ly_inf_frac, 3);
gd.addNumericField("Relative weight of infinity calibration data", this.ly_inf_frac, 3);
gd.addNumericField("Maximal disparity to be treated as infinity when adjusting with the rig data", this.ly_inf_max_disparity, 8,3,"pix",
"Only used in guided (by rig data) mode");
gd.addCheckbox ("Correct disparity for infinity tiles )has to disable until code fixed)", this.ly_inf_disp);
gd.addCheckbox ("Force convergence correction during extrinsic, even with no infinity data", this.ly_inf_force);
gd.addCheckbox ("*Use polynomial correction, false - correct tilt/azimuth/roll of each sensor)", this.ly_poly);
gd.addCheckbox ("Correct disparity for infinity tiles )has to disable until code fixed)", this.ly_inf_disp);
gd.addCheckbox ("Force convergence correction during extrinsic, even with no infinity data", this.ly_inf_force);
gd.addCheckbox ("*Use polynomial correction, false - correct tilt/azimuth/roll of each sensor)", this.ly_poly);
gd.addMessage ("--- Lazy eye parameters ---");
gd.addNumericField("Sample size (side of a square)", this.ly_smpl_side, 0);
......@@ -3281,17 +3314,23 @@ public class CLTParameters {
this.ih_norm_center= gd.getNextBoolean();
this.inf_restore_disp= gd.getNextBoolean();
this.ly_lma_ers = gd.getNextBoolean();
this.ly_gt_strength= gd.getNextNumber();
this.ly_gt_use_wnd= gd.getNextBoolean();
this.ly_gt_rms= gd.getNextNumber();
this.ly_marg_fract= gd.getNextNumber();
this.ly_on_scan= gd.getNextBoolean();
this.ly_inf_en= gd.getNextBoolean();
this.ly_min_forced= (int) gd.getNextNumber();
this.ly_aztilt_en= gd.getNextBoolean();
this.ly_diff_roll_en= gd.getNextBoolean();
this.ly_focalLength= gd.getNextBoolean();
this.ly_com_roll= gd.getNextBoolean();
this.ly_ers_rot= gd.getNextBoolean();
this.ly_ers_lin= gd.getNextBoolean();
this.ly_ers_forw= gd.getNextBoolean();
this.ly_ers_side= gd.getNextBoolean();
this.ly_ers_vert= gd.getNextBoolean();
this.ly_par_sel= (int) gd.getNextNumber();
this.ly_debug_level= (int) gd.getNextNumber();
......
......@@ -85,7 +85,7 @@ import ij.process.ImageProcessor;
}
}
ImagePlus imp_stack = new ImagePlus(title, array_stack);
ImagePlus imp_stack = new ImagePlus(title, array_stack); // stack is empty
imp_stack.getProcessor().resetMinAndMax();
imp_stack.show();
return;
......
......@@ -34,7 +34,12 @@ public class CLTPass3d{
private double [][] disparity_sav; // saved disparity
private int [][] tile_op_sav; // saved tile_op
public double [][] disparity_map = null; // add 4 layers - worst difference for the port
public double [][] lazy_eye_data = null;
public int lma_cluster_size = -1;
public boolean [] lazy_eye_force_disparity = null;
double [] calc_disparity = null; // composite disparity, calculated from "disparity", and "disparity_map" fields
// using horizontal features and corr_magic_scale
// used directly in TileProcessor.compositeScan()
double [] calc_disparity_hor = null; // composite disparity, calculated from "disparity", and "disparity_map" fields
......@@ -55,7 +60,7 @@ public class CLTPass3d{
public boolean [] border_tiles = null; // these are border tiles, zero out alpha
public boolean [] selected = null; // which tiles are selected for this layer
public double [][][][] texture_tiles;
public double [][] max_tried_disparity = null; //[ty][tx] used for combined passes, shows maximal disparity for this tile, regardless of results
public double [][] max_tried_disparity = null; //[ty][tx] used for combined passes, shows maximal disparity for this tile, regardless of results
public boolean is_combo = false;
public boolean is_measured = false;
public String texture = null; // relative (to x3d) path
......@@ -235,6 +240,31 @@ public class CLTPass3d{
}
public boolean [] getLazyEyeForceDisparity() {
return lazy_eye_force_disparity;
}
public void setLazyEyeForceDisparity(boolean [] lazy_eye_force_disparity) {
this.lazy_eye_force_disparity = lazy_eye_force_disparity;
}
public double [][] getLazyEyeData() {
return lazy_eye_data;
}
public void setLazyEyeData(double [][] lazy_eye_data) {
this.lazy_eye_data = lazy_eye_data;
}
public int getLazyEyeClusterSize() {
return lma_cluster_size;
}
public void setLazyEyeClusterSize(int lma_cluster_size) {
this.lma_cluster_size = lma_cluster_size;
}
public boolean [] getSelected(){
return selected;
}
......
......@@ -1948,13 +1948,13 @@ public class Correlation2d {
}
if (debug_graphic) {
if (debug_graphic && (dbg_corr != null)) {
double [][] dbg_corrs = repackCluster(
dbg_corr,
clust_width);
(new ShowDoubleFloatArrays()).showArrays(
(new ShowDoubleFloatArrays()).showArrays( // empty array
dbg_corrs,
dbg_out_width,
dbg_out_height,
......
......@@ -62,20 +62,35 @@ public class ExtrinsicAdjustment {
private double [] weights; // normalized so sum is 1.0 for all - samples and extra regularization terms
private boolean [] force_disparity = null; // boolean [] force_disparity, // same dimension as dsdn, true if disparity should be controlled
private double pure_weight; // weight of samples only
private double [] values;
// private double [] values;
private GeometryCorrection.CorrVector corr_vector = null;
private boolean [] par_mask = null;
private boolean use_rig_offsets = false;
private double [][] measured_dsxy = null;
private double [][] dy_ddisparity = null; // conveniently extracted from dsdn
private double [][] x0y0 = null; //
private double [][] x0y0 = null; //
private double [][] world_xyz = null;
private double [] weight_window = null; // center area is more reliable
public GeometryCorrection geometryCorrection = null;
public int clusterSize;
public int clustersX;
public int clustersY;
public double [] getOldNewRMS() {
double [] on_rms = new double[2];
if (initial_rms != null) {
on_rms[0] = initial_rms[0];
} else {
on_rms[0] = Double.NaN;
}
if (last_rms != null) {
on_rms[1] = last_rms[1];
} else {
on_rms[1] = Double.NaN;
}
return on_rms;
}
public ExtrinsicAdjustment (
GeometryCorrection gc,
......@@ -88,7 +103,7 @@ public class ExtrinsicAdjustment {
this.clustersY = clustersY;
}
private void showInput(double[][] data, String title) {
public void showInput(double[][] data, String title) {
int clusters = clustersX * clustersY;
double [][] pixels = new double [ExtrinsicAdjustment.INDX_LENGTH][clusters];
for (int cluster = 0; cluster < clusters; cluster++) {
......@@ -136,22 +151,26 @@ public class ExtrinsicAdjustment {
}
public GeometryCorrection.CorrVector solveCorr (
double marg_fract, // part of half-width, and half-height to reduce weights
boolean use_disparity, // adjust disparity-related extrinsics
boolean use_aztilts, // Adjust azimuths and tilts excluding disparity
boolean use_diff_rolls, // Adjust differential rolls (3 of 4 angles)
boolean force_convergence, // if true try to adjust convergence (disparity, symmetrical parameter 0) even with no disparity
// data, using just radial distortions
// boolean force_convergence, // if true try to adjust convergence (disparity, symmetrical parameter 0) even with no disparity
// // data, using just radial distortions
int min_num_forced, // minimal number of clusters with forced disparity to use it
boolean common_roll, // Enable common roll (valid for high disparity range only)
boolean corr_focalLength, // Correct scales (focal length temperature? variations)
boolean ers_rot, // Enable ERS correction of the camera rotation
boolean ers_lin, // Enable ERS correction of the camera linear movement
boolean ers_forw, // Enable ERS correction of the camera linear movement in z direction
boolean ers_side, // Enable ERS correction of the camera linear movement in x direction
boolean ers_vert, // Enable ERS correction of the camera linear movement in y direction
// add balancing-related here?
int manual_par_sel, // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
double weight_disparity,
double weight_lazyeye,
double [][] measured_dsxy_in, //
boolean [] force_disparity_in, // boolean [] force_disparity,
GeometryCorrection geometryCorrection,
// GeometryCorrection geometryCorrection,
boolean use_main, // corr_rots_aux != null;
GeometryCorrection.CorrVector corr_vector_meas,
double [] old_new_rms, // should be double[2]
......@@ -162,19 +181,23 @@ public class ExtrinsicAdjustment {
this.use_rig_offsets = false;
this.measured_dsxy = measured_dsxy_in;
this.force_disparity = force_disparity_in;
final Matrix [] corr_rots_aux = null;
Matrix [][] deriv_rots_aux = null;
final Matrix [] corr_rots = use_main ? corr_rots_aux : corr_vector.getRotMatrices(); // get array of per-sensor rotation matrices
final Matrix [][] deriv_rots = use_main ? deriv_rots_aux : corr_vector.getRotDeriveMatrices();
boolean dbg_images = debugLevel > 0; // 1;
int clusters =clustersX * clustersY;
// dy_ddisparity = new double[clusters][];
// x0y0 = new double[clusters][];
boolean dbg_images = debugLevel > 1; // 2; // -3; // 2; // 1;
weight_window = getWeightWindow(marg_fract);
if (dbg_images) {
(new ShowDoubleFloatArrays()).showArrays(
weight_window,
clustersX,
clustersY,
"weight_window");
showInput(
measured_dsxy, // double[][] data,
"input data");// String title);
}
world_xyz = getWorldXYZ();
x0y0 = getXYNondistorted(
corr_vector,
true); // boolean set_dydisp)
......@@ -190,16 +213,19 @@ public class ExtrinsicAdjustment {
use_aztilts, // Adjust azimuths and tilts excluding disparity
use_diff_rolls, // Adjust differential rolls (3 of 4 angles)
common_roll,// boolean common_roll,
corr_focalLength, // boolean corr_focalLength);
corr_focalLength, // boolean corr_focalLength);
ers_rot, // boolean ers_rot, // Enable ERS correction of the camera rotation
ers_lin, // boolean ers_lin, // Enable ERS correction of the camera linear movement
manual_par_sel); // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
ers_forw, // Enable ERS correction of the camera linear movement in z direction
ers_side, // Enable ERS correction of the camera linear movement in x direction
ers_vert, // Enable ERS correction of the camera linear movement in y direction
manual_par_sel); // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
this.weights = getWeights(
measured_dsxy, // double [][] measured_dsxy,
// force_disparity, // boolean [] force_disparity, // same dimension as dsdn, true if disparity should be controlled
weight_disparity, // double weight_disparity,
weight_lazyeye); // double weight_lazyeye);
measured_dsxy, // double [][] measured_dsxy,
force_disparity, // boolean [] force_disparity, // same dimension as dsdn, true if disparity should be controlled
min_num_forced, // int min_num_forced,
weight_disparity, // double weight_disparity,
weight_lazyeye); // double weight_lazyeye);
double lambda = 0.1;
double lambda_scale_good = 0.5;
......@@ -216,6 +242,11 @@ public class ExtrinsicAdjustment {
rms_diff, // double rms_diff, // 0.001
num_iter, // int num_iter, // 20
debugLevel); // int debug_level)
if (old_new_rms != null) {
double [] on_rms = getOldNewRMS();
old_new_rms[0] = on_rms[0];
old_new_rms[1] = on_rms[1];
}
return lma_OK? corr_vector : null;
}
......@@ -230,7 +261,8 @@ public class ExtrinsicAdjustment {
double [][] xyND = new double[clusters][];
if (set_dydisp) {
dy_ddisparity = new double[clusters][];
} for (int cluster = 0; cluster < clusters; cluster++) {
}
for (int cluster = 0; cluster < clusters; cluster++) {
if (measured_dsxy[cluster] != null) {
if (set_dydisp) {
dy_ddisparity[cluster] = new double[NUM_SENSORS];
......@@ -246,15 +278,37 @@ public class ExtrinsicAdjustment {
null, // double [][] pXYNDderiv, // if not null, should be double[8][]
dy_ddisparity[cluster], // dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
imu, // double [] imu,
world_xyz[cluster], // double [] xyz, // world XYZ for ERS correction
measured_dsxy[cluster][INDX_PX + 0], // double px,
measured_dsxy[cluster][INDX_PX + 1], // double py,
measured_dsxy[cluster][INDX_TARGET]); // double disparity);
}
}
return xyND;
}
}
private double [][] getWorldXYZ(){
int clusters =clustersX * clustersY;
double [][] world_xyz = new double[clusters][];
for (int cluster = 0; cluster < clusters; cluster++) {
if (measured_dsxy[cluster] != null) {
double disparity = measured_dsxy[cluster][INDX_TARGET];
if (disparity > 0.0) {
world_xyz[cluster] = geometryCorrection.getWorldCoordinates( // USED in lwir
measured_dsxy[cluster][INDX_PX + 0], // double px,
measured_dsxy[cluster][INDX_PX + 1], // double py,
disparity, // double disparity,
true); // boolean correctDistortions)
}
}
}
return world_xyz;
}
/*
private double [] getYminusFx(
GeometryCorrection.CorrVector corr_vector)
{
......@@ -273,11 +327,11 @@ public class ExtrinsicAdjustment {
dy_ddisparity[cluster], // double [] dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
imu, // double [] imu,
x0y0[cluster], // double [] pXYND0, // per-port non-distorted coordinates corresponding to the correlation measurements
world_xyz[cluster], // double [] xyz, // world XYZ for ERS correction
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_PX + 0], // double px,
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_PX + 1], // double py,
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_TARGET]); // double disparity);
//arraycopy(Object src, int srcPos, Object dest, int destPos, int length)
// System.arraycopy(src_pixels, 0, dst_pixels, 0, src_pixels.length); /* for the borders closer to 1/2 kernel size*/
ddnd[0] = -ddnd[0];
if ((force_disparity != null) && force_disparity[cluster]) {
ddnd[0] -= measured_dsxy[cluster][ExtrinsicAdjustment.INDX_DIFF];
......@@ -291,7 +345,7 @@ public class ExtrinsicAdjustment {
}
return y_minus_fx;
}
*/
private double [] getWYmFxRms( // USED in lwir
double [] fx) {
int clusters = clustersX * clustersY;
......@@ -324,22 +378,75 @@ public class ExtrinsicAdjustment {
return rslt;
}
private double [] getWeightWindow(double marg_fraction) { // 0.0 - no margins, 1.0 - pure cosine
double mf_hor = marg_fraction;
double mf_vert = marg_fraction;
double [] wx = new double [clustersX];
double [] wy = new double [clustersY];
double [] w = new double [clustersX * clustersY];
int [] boost_wnd = {33,15,40,35};
double boost_scale = 1.0; // 100.0;
double center = 0.5 * (clustersX - 1);
double marg = center * mf_hor;
for (int x = 0; x <= clustersX / 2; x++) {
if (x < marg) {
wx[x] = Math.sin(Math.PI * x / 2.0 / marg);
wx[x] *= wx[x];
} else {
wx[x] = 1.0;
}
wx[clustersX - 1 -x ] = wx[x];
}
center = 0.5 * (clustersY - 1);
marg = center * mf_vert;
for (int y = 0; y <= clustersY / 2; y++) {
if (y < marg) {
wy[y] = Math.sin(Math.PI * y / 2.0 / marg);
wy[y] *= wx[y];
} else {
wy[y] = 1.0;
}
wy[clustersY - 1 - y ] = wy[y];
}
for (int y = 0; y < clustersY; y++) {
for (int x = 0; x < clustersX; x++) {
w[y * clustersX + x] = wx[x]*wy[y];
if (boost_scale > 1.0) {
if ((x >= boost_wnd[0]) && (x < boost_wnd[2]) && (y >= boost_wnd[1]) && (y < boost_wnd[3])) {
w[y * clustersX + x] *= boost_scale;
}
}
}
}
return w;
}
private double [] getWeights(
double [][] measured_dsxy,
boolean [] force_disparity, // same dimension as dsdn, true if disparity should be controlled
int min_num_forced, // if number of forced samples exceeds this, zero out weights of non-forced
double weight_disparity,
double weight_lazyeye)
{
int clusters = clustersX * clustersY;
double [] weights = new double [clusters * POINTS_SAMPLE];
double sw = 0.0;
int num_forced = 0;
if (force_disparity != null) for (int cluster = 0; cluster < clusters; cluster++) if (force_disparity[cluster])num_forced ++;
boolean use_forced = num_forced >= min_num_forced;
for (int cluster = 0; cluster < clusters; cluster++) if (measured_dsxy[cluster] != null){
double w;
// if ((force_disparity != null) && force_disparity[cluster]) {
w = measured_dsxy[cluster][ExtrinsicAdjustment.INDX_STRENGTH] * weight_disparity;
double s = measured_dsxy[cluster][ExtrinsicAdjustment.INDX_STRENGTH] * weight_window[cluster];
double w = s * weight_disparity;
if (use_forced && !force_disparity[cluster]) {
w = 0.0;
}
weights[cluster * POINTS_SAMPLE + 0] = w;
sw += w;
// }
w = measured_dsxy[cluster][ExtrinsicAdjustment.INDX_STRENGTH] * weight_lazyeye;
w = s * weight_lazyeye;
for (int i = 1; i < POINTS_SAMPLE; i++) {
weights[cluster * POINTS_SAMPLE + i] = w;
}
......@@ -378,6 +485,7 @@ public class ExtrinsicAdjustment {
dy_ddisparity[cluster], // double [] dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
imu, // double [] imu,
x0y0[cluster], // double [] pXYND0, // per-port non-distorted coordinates corresponding to the correlation measurements
world_xyz[cluster], // double [] xyz, // world XYZ for ERS correction
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_PX + 0], // double px,
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_PX + 1], // double py,
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_TARGET]); // double disparity);
......@@ -616,6 +724,7 @@ public class ExtrinsicAdjustment {
deriv_rots, // Matrix [][] deriv_rots,
dy_ddisparity[cluster], // double [] dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
imu, // double [] imu,
world_xyz[cluster], // double [] xyz, // world XYZ for ERS correction
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_PX + 0], // double px,
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_PX + 1], // double py,
measured_dsxy[cluster][ExtrinsicAdjustment.INDX_TARGET]); // double disparity);
......
......@@ -290,6 +290,7 @@ public class GeometryCorrection {
return extrinsic_corr;
}
public void setCorrVector(double [] dv){
setCorrVector(new CorrVector(dv));
}
......@@ -333,9 +334,10 @@ public class GeometryCorrection {
use_diff_rolls, // Adjust differential rolls (3 of 4 angles)
common_roll,
corr_focalLength,
false, // boolean ers_rot, // Enable ERS correction of the camera rotation
false, // boolean ers_lin, // Enable ERS correction of the camera linear movement
false, // boolean ers_rot, // Enable ERS correction of the camera rotation
false, // boolean ers_forw, // Enable ERS correction of the camera linear movement in z direction
false, // boolean ers_side, // Enable ERS correction of the camera linear movement in x direction
false, // boolean ers_vert, // Enable ERS correction of the camera linear movement in y direction
manual_par_sel); // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
}
......@@ -347,7 +349,9 @@ public class GeometryCorrection {
boolean common_roll,
boolean corr_focalLength,
boolean ers_rot, // Enable ERS correction of the camera rotation
boolean ers_lin, // Enable ERS correction of the camera linear movement
boolean ers_forw, // Enable ERS correction of the camera linear movement in z direction
boolean ers_side, // Enable ERS correction of the camera linear movement in x direction
boolean ers_vert, // Enable ERS correction of the camera linear movement in y direction
int manual_par_sel) // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
......@@ -360,7 +364,9 @@ public class GeometryCorrection {
common_roll,
corr_focalLength,
ers_rot, // Enable ERS correction of the camera rotation
ers_lin, // Enable ERS correction of the camera linear movement
ers_forw, // Enable ERS correction of the camera linear movement in z direction
ers_side, // Enable ERS correction of the camera linear movement in x direction
ers_vert, // Enable ERS correction of the camera linear movement in y direction
manual_par_sel); // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
}
......@@ -1679,6 +1685,13 @@ public class GeometryCorrection {
return new CorrVector(athis);
}
public double getNorm() {
double s2 = 0;
for (int i = 0; i < vector.length; i++) {
s2 += vector[i]*vector[i];
}
return Math.sqrt(s2); // add weights to compare apples and oranges?
}
@Override
......@@ -1832,7 +1845,9 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
boolean common_roll,
boolean corr_focalLength,
boolean ers_rot, // Enable ERS correction of the camera rotation
boolean ers_lin, // Enable ERS correction of the camera linear movement
boolean ers_forw, // Enable ERS correction of the camera linear movement in z direction
boolean ers_side, // Enable ERS correction of the camera linear movement in x direction
boolean ers_vert, // Enable ERS correction of the camera linear movement in y direction
int manual_par_sel) // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
{
......@@ -1853,9 +1868,9 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
ers_rot, //sym13
ers_rot, //sym14
ers_rot, //sym15
ers_lin, //sym16
ers_lin, //sym17
ers_lin //sym18
ers_side, //sym16
ers_vert, //sym17
ers_forw //sym18
};
if (manual_par_sel != 0) { // not used in lwir
for (int i = 0; i < par_mask.length; i++) {
......@@ -2679,6 +2694,11 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double [] rad_coeff={this.distortionC,this.distortionB,this.distortionA,this.distortionA5,this.distortionA6,this.distortionA7,this.distortionA8};
double fl_pix = focalLength/(0.001*pixelSize); // focal length in pixels - this camera
double ri_scale = 0.001 * this.pixelSize / this.distortionRadius;
double [] xyz = (disparity > 0) ? getWorldCoordinates( // USED in lwir
px, // double px,
py, // double py,
disparity, // double disparity,
true) : null; // boolean correctDistortions)
for (int i = 0; i < numSensors; i++){
// non-distorted XY of the shifted location of the individual sensor
......@@ -2778,6 +2798,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
}
double delta_t = 0.0;
double [] imu = null;
double [][] dpXci_pYci_imu_lin = new double[2][3]; // null
if (disp_dist != null) {
disp_dist[i] = new double [4]; // dx/d_disp, dx_d_ccw_disp
// Not clear - what should be in Z direction before rotation here?
......@@ -2811,11 +2832,35 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
disp_dist[i][3] = dd2.get(1, 1);
imu = extrinsic_corr.getIMU(i); // currently it is common for all channels
delta_t = dd2.get(1, 0) * disparity * line_time; // positive for top cameras, negative - for bottom
double ers_Xci = delta_t* (dpXci_dtilt * imu[0] + dpXci_dazimuth * imu[1] + dpXci_droll * imu[2]);
double ers_Yci = delta_t* (dpYci_dtilt * imu[0] + dpYci_dazimuth * imu[1] + dpYci_droll * imu[2]);
pXY[i][0] += ers_Xci * rD2rND; // added correction to pixel X
pXY[i][1] += ers_Yci * rD2rND; // added correction to pixel Y
// ERS linear does not yet use per-port rotations, probably not needed
// double [][] dpXci_pYci_imu_lin = new double[2][3]; // null
if ((imu[0] != 0.0) || (imu[1] != 0.0) ||(imu[2] != 0.0) ||(imu[3] != 0.0) ||(imu[4] != 0.0) ||(imu[5] != 0.0)) {
delta_t = dd2.get(1, 0) * disparity * line_time; // positive for top cameras, negative - for bottom
double ers_Xci = delta_t* (dpXci_dtilt * imu[0] + dpXci_dazimuth * imu[1] + dpXci_droll * imu[2]);
double ers_Yci = delta_t* (dpYci_dtilt * imu[0] + dpYci_dazimuth * imu[1] + dpYci_droll * imu[2]);
if (xyz != null) {
double k = SCENE_UNITS_SCALE * this.disparityRadius;
// double wdisparity = -(k * this.focalLength / (0.001*this.pixelSize)) / xyz[2];
double wdisparity = disparity;
double dwdisp_dz = (k * this.focalLength / (0.001*this.pixelSize)) / (xyz[2] * xyz[2]);
dpXci_pYci_imu_lin[0][0] = -wdisparity / k; // dpx/ dworld_X
dpXci_pYci_imu_lin[1][1] = wdisparity / k; // dpy/ dworld_Y
dpXci_pYci_imu_lin[0][2] = (xyz[0] / k) * dwdisp_dz; // dpx/ dworld_Z
dpXci_pYci_imu_lin[1][2] = (xyz[1] / k) * dwdisp_dz; // dpy/ dworld_Z
ers_Xci += delta_t* (dpXci_pYci_imu_lin[0][0] * imu[3] + dpXci_pYci_imu_lin[0][2] * imu[5]);
ers_Yci += delta_t* (dpXci_pYci_imu_lin[1][1] * imu[4] + dpXci_pYci_imu_lin[1][2] * imu[5]);
}
pXY[i][0] += ers_Xci * rD2rND; // added correction to pixel X
pXY[i][1] += ers_Yci * rD2rND; // added correction to pixel Y
} else {
imu = null;
}
// TODO: calculate derivatives of pX, pY by 3 imu omegas
}
......@@ -2825,20 +2870,8 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
if (pXYderiv != null) {
pXYderiv[2 * i] = new double [CorrVector.LENGTH];
pXYderiv[2 * i+1] = new double [CorrVector.LENGTH];
/// Matrix drvi_daz = deriv_rots[i][0].times(vi);
/// Matrix drvi_dtl = deriv_rots[i][1].times(vi);
/// Matrix drvi_drl = deriv_rots[i][2].times(vi);
Matrix drvi_dzm = deriv_rots[i][3].times(vi);
/// double dpXci_dazimuth = drvi_daz.get(0, 0) * norm_z - pXci * drvi_daz.get(2, 0) / rvi.get(2, 0);
/// double dpYci_dazimuth = drvi_daz.get(1, 0) * norm_z - pYci * drvi_daz.get(2, 0) / rvi.get(2, 0);
/// double dpXci_dtilt = drvi_dtl.get(0, 0) * norm_z - pXci * drvi_dtl.get(2, 0) / rvi.get(2, 0);
/// double dpYci_dtilt = drvi_dtl.get(1, 0) * norm_z - pYci * drvi_dtl.get(2, 0) / rvi.get(2, 0);
/// double dpXci_droll = drvi_drl.get(0, 0) * norm_z - pXci * drvi_drl.get(2, 0) / rvi.get(2, 0);
/// double dpYci_droll = drvi_drl.get(1, 0) * norm_z - pYci * drvi_drl.get(2, 0) / rvi.get(2, 0);
double dpXci_dzoom = drvi_dzm.get(0, 0) * norm_z - pXci * drvi_dzm.get(2, 0) / rvi.get(2, 0);
double dpYci_dzoom = drvi_dzm.get(1, 0) * norm_z - pYci * drvi_dzm.get(2, 0) / rvi.get(2, 0);
......@@ -2847,17 +2880,6 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double dri_dzoom = ri_scale / rNDi* (pXci * dpXci_dzoom + pYci * dpYci_dzoom);
/*
double dri_droll = ri_scale / rNDi* (pXci * dpXci_droll + pYci * dpYci_droll); // Not used anywhere ?
// TODO: verify dri_droll == 0 and remove
*/
// double drD2rND_dri = 0.0;
// rri = 1.0;
// for (int j = 0; j < rad_coeff.length; j++){
// drD2rND_dri += rad_coeff[j] * (j+1) * rri;
// rri *= ri;
// }
double drD2rND_dazimuth = drD2rND_dri * dri_dazimuth;
double drD2rND_dtilt = drD2rND_dri * dri_dtilt;
......@@ -2876,15 +2898,21 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double dpYid_dzoom = dpYci_dzoom * rD2rND + pYci * drD2rND_dzoom; // new second term
// assuming drD2rND_imu* is zero (rD2rND does not depend on imu_*
// hope it will not be needed, as derivatives are used only for filed calibration, handled differently
if (imu != null) {
// dpX_d = delta_t * rD2rND * (dpXci_dtilt * imu[0] + dpXci_dazimuth * imu[1] + dpXci_droll * imu[2]);
// dpX_d = delta_t * rD2rND * (dpYci_dtilt * imu[0] + dpYci_dazimuth * imu[1] + dpYci_droll * imu[2]);
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+0] = delta_t * rD2rND * dpXci_dtilt * imu[0];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+0] = delta_t * rD2rND * dpYci_dtilt * imu[0];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+1] = delta_t * rD2rND * dpXci_dazimuth * imu[0];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+1] = delta_t * rD2rND * dpYci_dazimuth * imu[0];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+2] = delta_t * rD2rND * dpYci_droll * imu[0];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+2] = delta_t * rD2rND * dpYci_droll * imu[0];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+0] = delta_t * rD2rND * dpXci_dtilt; // * imu[0];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+0] = delta_t * rD2rND * dpYci_dtilt; // * imu[0];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+1] = delta_t * rD2rND * dpXci_dazimuth; // * imu[1];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+1] = delta_t * rD2rND * dpYci_dazimuth; // * imu[1];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+2] = delta_t * rD2rND * dpYci_droll; // * imu[2];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+2] = delta_t * rD2rND * dpYci_droll; // * imu[2];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+3] = delta_t * rD2rND * dpXci_pYci_imu_lin[0][0]; // * imu[3];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+4] = delta_t * rD2rND * dpXci_pYci_imu_lin[1][1]; // * imu[5];
pXYderiv[2 * i + 0][CorrVector.IMU_INDEX+5] = delta_t * rD2rND * dpXci_pYci_imu_lin[0][2]; // * imu[5];
pXYderiv[2 * i + 1][CorrVector.IMU_INDEX+5] = delta_t * rD2rND * dpXci_pYci_imu_lin[1][2]; // * imu[5];
// TODO: Add linear egomotion
......@@ -2932,6 +2960,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double [] dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
double [] imu,
double [] pXYND0, // per-port non-distorted coordinates corresponding to the correlation measurements
double [] xyz, // world XYZ for ERS correction
double px,
double py,
double disparity)
......@@ -2947,6 +2976,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
pXYNDderiv, // if not null, should be double[8][]
dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
imu,
xyz,
px,
py,
disparity);
......@@ -2972,6 +3002,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
Matrix [][] deriv_rots,
double [] dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
double [] imu, // may be null
double [] xyz, // world XYZ for ERS correction
double px,
double py,
double disparity)
......@@ -2987,6 +3018,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
pXYNDderiv, // if not null, should be double[8][]
dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
imu,
xyz,
px,
py,
disparity);
......@@ -3007,6 +3039,8 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
* @param deriv_rots derivatives by d_az, f_elev, d_rot, d_zoom
* @param pXYNDderiv null or double[2 * number_of_cameras][] array to accommodate derivatives of px, py by each of the parameters
* @param dy_ddisparity - array of per-port derivatives of sensor pY by disparity (to correct ERS) or null (if no ERS correction needed)
* @param imu - 6 components od the egomotion - 3 rotations and 3 linear velocities
* $param xyz - world coordinates for linear motion ERS correction
* @param px pixel X coordinate
* @param py pixel Y coordinate
* @param disparity disparity (for non-distorted image space)
......@@ -3020,6 +3054,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double [][] pXYNDderiv, // if not null, should be double[8][]
double [] dy_ddisparity, // double [][] disp_dist, //disp_dist[i][2] or null
double [] imu,
double [] xyz, // world XYZ for ERS correction
double px,
double py,
double disparity)
......@@ -3079,11 +3114,28 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
dpXci_droll = drvi_drl.get(0, 0) * norm_z - pXci * drvi_drl.get(2, 0) / rvi.get(2, 0);
dpYci_droll = drvi_drl.get(1, 0) * norm_z - pYci * drvi_drl.get(2, 0) / rvi.get(2, 0);
}
double [][] dpXci_pYci_imu_lin = new double[2][3]; // null
if (xyz != null) {
// restore disparity back from the world coordinates to make it a constant
double k = SCENE_UNITS_SCALE * this.disparityRadius;
double wdisparity = -(k * this.focalLength / (0.001*this.pixelSize)) / xyz[2];
double dwdisp_dz = (k * this.focalLength / (0.001*this.pixelSize)) / (xyz[2] * xyz[2]);
// double wpXc = xyz[0] * wdisparity / k; // pixels
// double wpYc =-xyz[1] * wdisparity / k; // pixels
dpXci_pYci_imu_lin[0][0] = -wdisparity / k; // dpx/ dworld_X // TODO: Change sign - here and in the other similar place!
dpXci_pYci_imu_lin[1][1] = wdisparity / k; // dpy/ dworld_Y
dpXci_pYci_imu_lin[0][2] = (xyz[0] / k) * dwdisp_dz; // dpx/ dworld_Z
dpXci_pYci_imu_lin[1][2] = (xyz[1] / k) * dwdisp_dz; // dpy/ dworld_Z
}
double delta_t = 0.0;
// TODO: ignoring rotations - add it?
if ((dy_ddisparity != null) && (imu != null)) {
delta_t = dy_ddisparity[i] * disparity * line_time; // positive for top cameras, negative - for bottom
double ers_Xci = delta_t* (dpXci_dtilt * imu[0] + dpXci_dazimuth * imu[1] + dpXci_droll * imu[2]);
double ers_Yci = delta_t* (dpYci_dtilt * imu[0] + dpYci_dazimuth * imu[1] + dpYci_droll * imu[2]);
double ers_Xci = delta_t* (dpXci_dtilt * imu[0] + dpXci_dazimuth * imu[1] + dpXci_droll * imu[2] +
dpXci_pYci_imu_lin[0][0] * imu[3] + dpXci_pYci_imu_lin[0][2] * imu[5]);
double ers_Yci = delta_t* (dpYci_dtilt * imu[0] + dpYci_dazimuth * imu[1] + dpYci_droll * imu[2]+
dpXci_pYci_imu_lin[1][1] * imu[4] + dpXci_pYci_imu_lin[1][2] * imu[5]);
pXYND[2 * i + 0] += ers_Xci; // added correction to pixel X
pXYND[2 * i + 1] += ers_Yci; // added correction to pixel Y
}
......@@ -3101,6 +3153,15 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
pXYNDderiv[2 * i + 1][CorrVector.IMU_INDEX+1] = delta_t * dpYci_dazimuth; // * imu[1];
pXYNDderiv[2 * i + 0][CorrVector.IMU_INDEX+2] = delta_t * dpXci_droll; // * imu[2];
pXYNDderiv[2 * i + 1][CorrVector.IMU_INDEX+2] = delta_t * dpYci_droll; // * imu[2];
pXYNDderiv[2 * i + 0][CorrVector.IMU_INDEX+3] = delta_t * dpXci_pYci_imu_lin[0][0]; // * imu[3];
// pXYNDderiv[2 * i + 1][CorrVector.IMU_INDEX+3] = delta_t * dpXci_pYci_imu_lin[1][0]; // * imu[3]; // 0
// pXYNDderiv[2 * i + 0][CorrVector.IMU_INDEX+4] = delta_t * dpXci_pYci_imu_lin[0][1]; // * imu[4]; // 0
pXYNDderiv[2 * i + 1][CorrVector.IMU_INDEX+4] = delta_t * dpXci_pYci_imu_lin[1][1]; // * imu[5];
pXYNDderiv[2 * i + 0][CorrVector.IMU_INDEX+5] = delta_t * dpXci_pYci_imu_lin[0][2]; // * imu[5];
pXYNDderiv[2 * i + 1][CorrVector.IMU_INDEX+5] = delta_t * dpXci_pYci_imu_lin[1][2]; // * imu[5];
}
// verify that d/dsym are well, symmetrical
......
......@@ -1513,8 +1513,6 @@ public class ImageDtt {
}
// removing macro and FPGA modes
// public double [][][][][][] clt_aberrations_quad_corr_min( // USED in LWIR
// public double [][] clt_aberrations_quad_corr_min( // returns d,s lazy eye parameters
public double [][] cltMeasureLazyEye ( // returns d,s lazy eye parameters
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
// final int macro_scale, // to correlate tile data instead of the pixel data: 1 - pixels, 8 - tiles
......@@ -1813,8 +1811,9 @@ public class ImageDtt {
boolean debugCluster = (clustX == debug_clustX) && (clustY == debug_clustY);
boolean debugCluster1 = (Math.abs(clustX - debug_clustX) < 10) && (Math.abs(clustY - debug_clustY) < 10);
if (debugCluster) {
System.out.println("debugCluster");
}
int clust_lma_debug_level = debugCluster? imgdtt_params.lma_debug_level : -5;
// filter only tiles with similar disparity to enable lazy eye for the ERS.
int num_good_tiles = 0;
......@@ -1872,9 +1871,11 @@ public class ImageDtt {
cTile = cTileY * tileStep + cTileX;
tIndex = tileY * tilesX + tileX;
// int nTile = tileY * tilesX + tileX; // how is it different from tIndex?
for (int cam = 0; cam < quad; cam++) {
clt_mismatch[3*cam + 0][tIndex] = Double.NaN;
clt_mismatch[3*cam + 1][tIndex] = Double.NaN;
if (clt_mismatch != null) {
for (int cam = 0; cam < quad; cam++) {
clt_mismatch[3*cam + 0][tIndex] = Double.NaN;
clt_mismatch[3*cam + 1][tIndex] = Double.NaN;
}
}
}
}
......@@ -1956,7 +1957,7 @@ public class ImageDtt {
for (int i = 0; i < quad; i++) {
System.out.println("clt_aberrations_quad_corr(): tileX="+tileX+", tileY="+tileY+
" centerX="+centerX+" centerY="+centerY+" disparity="+disparity_array[tileY][tileX]+
" centersXY["+cTile+"]["+i+"][0]="+centersXY[0][i][0]+" centersXY["+cTile+"]["+i+"][1]="+centersXY[cTile][i][1]);
" centersXY["+cTile+"]["+i+"][0]="+centersXY[cTile][i][0]+" centersXY["+cTile+"]["+i+"][1]="+centersXY[cTile][i][1]);
}
}
if (debug_offsets != null) {
......@@ -2087,140 +2088,152 @@ public class ImageDtt {
// all color channels are done here
if (disparity_map != null){ // not null - calculate correlations
// if (disparity_map != null){ // not null - calculate correlations
if (disparity_map != null){
for (int i = 0; i < disparity_map.length; i++) {
if (disparity_map[i] != null) disparity_map[i][nTile] = (
(i == DISPARITY_STRENGTH_INDEX) ||
(i == DISPARITY_INDEX_HOR_STRENGTH) ||
(i == DISPARITY_INDEX_VERT_STRENGTH)) ? 0.0 : Double.NaN; // once and for all
}
// calculate overexposed fraction
// calculate overexposed fraction
if (saturation_imp != null){
disparity_map[OVEREXPOSED][nTile] = (1.0 * overexp_all[0]) / overexp_all[1];
}
}
// calculate all selected pairs correlations
int all_pairs = imgdtt_params.dbg_pair_mask; //TODO: use tile tasks
corrs[cTile] = corr2d.correlateCompositeFD( // now works with nulls for some clt_data colors
clt_data, // double [][][][][][] clt_data,
tileX, // int tileX,
tileY, // int tileY,
all_pairs, // int pairs_mask,
filter, // double [] lpf,
scale_strengths, // double scale_value, // scale correlation value
col_weights, // double [] col_weights,
corr_fat_zero); // double fat_zero)
// calculate interpolated "strips" to match different scales and orientations (ortho/diagonal) on the
// fine (0.5 pix) grid. ortho for scale == 1 provide even/even samples (1/4 of all), diagonal ones -
// checkerboard pattern
double [][] strips = corr2d.scaleRotateInterpoateCorrelations(
corrs[cTile], // double [][] correlations,
all_pairs, // int pairs_mask,
imgdtt_params.corr_strip_hight, //); // int hwidth);
(tile_lma_debug_level > 0) ? all_pairs:0); // debugMax);
// Combine strips for selected pairs. Now using only for all available pairs.
// Other combinations are used only if requested (clt_corr_partial != null)
double [] strip_combo = corr2d.combineInterpolatedCorrelations(
strips, // double [][] strips,
all_pairs, // int pairs_mask,
imgdtt_params.corr_offset, // double offset);
imgdtt_params.twice_diagonal); // boolean twice_diagonal)
// calculate CM maximums for all mixed channels
// First get integer correlation center, relative to the center
int [] ixy = corr2d.getMaxXYInt( // find integer pair or null if below threshold
strip_combo, // double [] data,
true, // boolean axis_only,
imgdtt_params.min_corr, // double minMax, // minimal value to consider (at integer location, not interpolated)
tile_lma_debug_level > 0); // boolean debug);
// double [] corr_stat = null;
// if integer argmax was strong enough, calculate CM argmax
// will not fill out DISPARITY_INDEX_INT+1, DISPARITY_INDEX_CM+1, DISPARITY_INDEX_POLY+1
// use clt_mismatch for that
// double strength = 0.0;
// double disparity = 0.0;
if (ixy != null) {
strength[cTile] = strip_combo[ixy[0]+transform_size-1]; // strength at integer max on axis
// calculate all selected pairs correlations
int all_pairs = imgdtt_params.dbg_pair_mask; //TODO: use tile tasks
corrs[cTile] = corr2d.correlateCompositeFD( // now works with nulls for some clt_data colors
clt_data, // double [][][][][][] clt_data,
tileX, // int tileX,
tileY, // int tileY,
all_pairs, // int pairs_mask,
filter, // double [] lpf,
scale_strengths, // double scale_value, // scale correlation value
col_weights, // double [] col_weights,
corr_fat_zero); // double fat_zero)
// calculate interpolated "strips" to match different scales and orientations (ortho/diagonal) on the
// fine (0.5 pix) grid. ortho for scale == 1 provide even/even samples (1/4 of all), diagonal ones -
// checkerboard pattern
double [][] strips = corr2d.scaleRotateInterpoateCorrelations(
corrs[cTile], // double [][] correlations,
all_pairs, // int pairs_mask,
imgdtt_params.corr_strip_hight, //); // int hwidth);
(tile_lma_debug_level > 0) ? all_pairs:0); // debugMax);
// Combine strips for selected pairs. Now using only for all available pairs.
// Other combinations are used only if requested (clt_corr_partial != null)
double [] strip_combo = corr2d.combineInterpolatedCorrelations(
strips, // double [][] strips,
all_pairs, // int pairs_mask,
imgdtt_params.corr_offset, // double offset);
imgdtt_params.twice_diagonal); // boolean twice_diagonal)
// calculate CM maximums for all mixed channels
// First get integer correlation center, relative to the center
int [] ixy = corr2d.getMaxXYInt( // find integer pair or null if below threshold
strip_combo, // double [] data,
true, // boolean axis_only,
imgdtt_params.min_corr, // double minMax, // minimal value to consider (at integer location, not interpolated)
tile_lma_debug_level > 0); // boolean debug);
// double [] corr_stat = null;
// if integer argmax was strong enough, calculate CM argmax
// will not fill out DISPARITY_INDEX_INT+1, DISPARITY_INDEX_CM+1, DISPARITY_INDEX_POLY+1
// use clt_mismatch for that
// double strength = 0.0;
// double disparity = 0.0;
if (ixy != null) {
strength[cTile] = strip_combo[ixy[0]+transform_size-1]; // strength at integer max on axis
if (disparity_map != null){
disparity_map[DISPARITY_INDEX_INT][tIndex] = -ixy[0];
disparity_map[DISPARITY_STRENGTH_INDEX][tIndex] = strength[cTile];
if (Double.isNaN(disparity_map[DISPARITY_STRENGTH_INDEX][tIndex])) {
System.out.println("BUG: 1. disparity_map[DISPARITY_STRENGTH_INDEX]["+tIndex+"] should not be NaN");
}
corr_stat[cTile] = corr2d.getMaxXCm( // get fractional center as a "center of mass" inside circle/square from the integer max
strip_combo, // double [] data, // [data_size * data_size]
ixy[0], // int ixcenter, // integer center x
// corr_wndy, // double [] window_y, // (half) window function in y-direction(perpendicular to disparity: for row0 ==1
// corr_wndx, // double [] window_x, // half of a window function in x (disparity) direction
(tile_lma_debug_level > 0)); // boolean debug);
}
corr_stat[cTile] = corr2d.getMaxXCm( // get fractional center as a "center of mass" inside circle/square from the integer max
strip_combo, // double [] data, // [data_size * data_size]
ixy[0], // int ixcenter, // integer center x
// corr_wndy, // double [] window_y, // (half) window function in y-direction(perpendicular to disparity: for row0 ==1
// corr_wndx, // double [] window_x, // half of a window function in x (disparity) direction
(tile_lma_debug_level > 0)); // boolean debug);
}
// proceed only if CM correlation result is non-null // for compatibility with old code we need it to run regardless of the strength of the normal correlation
// proceed only if CM correlation result is non-null // for compatibility with old code we need it to run regardless of the strength of the normal correlation
if (disparity_map != null){
if (corr_stat[cTile] != null) {
disparity_map[DISPARITY_INDEX_CM][tIndex] = -corr_stat[cTile][0]; // disp_str[cTile][0]; // disparity is negative X
disparity_map[DISPARITY_INDEX_INT+1][tIndex] = -corr_stat[cTile][0]/.85 + disparity_array[tileY][tileX] + disparity_corr; // disp_str[cTile][0]; // disparity is negative X
}
disp_str[cTile] = new double[2];
// disp_str[cTile][0] = -corr_stat[cTile][0];
// disp_str[cTile][1] = corr_stat[cTile][1]; // strength
if (tile_lma_debug_level > 0) {
System.out.println("Will run getMaxXSOrtho( ) for tileX="+tileX+", tileY="+tileY);
}
// hack: reuse/overwrite for target disparity
disparity_map[DISPARITY_INDEX_INT][tIndex] = disparity_array[tileY][tileX] + disparity_corr;
}
disp_str[cTile] = new double[2];
if (tile_lma_debug_level > 0) {
System.out.println("Will run getMaxXSOrtho( ) for tileX="+tileX+", tileY="+tileY);
}
// debug new LMA correlations
int tdl = debugCluster ? tile_lma_debug_level : -3;
if (true) { // debugCluster1) {
if (debugCluster && (globalDebugLevel > -1)) { // -2)) {
System.out.println("Will run new LMA for tileX="+tileX+", tileY="+tileY);
}
double [] poly_disp = {Double.NaN, 0.0};
Corr2dLMA lma2 = corr2d.corrLMA2(
imgdtt_params, // ImageDttParameters imgdtt_params,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs[cTile], // double [][] corrs,
disp_dist[cTile],
rXY, // double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
imgdtt_params.dbg_pair_mask, // int pair_mask, // which pairs to process
null, // disp_str[cTile], //corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
poly_disp, // double[] poly_ds, // null or pair of disparity/strength
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
tdl, // tile_lma_debug_level, //+2, // int debug_level,
tileX, // int tileX, // just for debug output
tileY); // int tileY
disp_str[cTile] = null;
// debug new LMA correlations
int tdl = debugCluster ? tile_lma_debug_level : -3;
if (true) { // debugCluster1) {
if (debugCluster && (globalDebugLevel > -1)) { // -2)) {
System.out.println("Will run new LMA for tileX="+tileX+", tileY="+tileY);
}
double [] poly_disp = {Double.NaN, 0.0};
Corr2dLMA lma2 = corr2d.corrLMA2(
imgdtt_params, // ImageDttParameters imgdtt_params,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs[cTile], // double [][] corrs,
disp_dist[cTile],
rXY, // double [][] rXY, // non-distorted X,Y offset per nominal pixel of disparity
imgdtt_params.dbg_pair_mask, // int pair_mask, // which pairs to process
null, // disp_str[cTile], //corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
poly_disp, // double[] poly_ds, // null or pair of disparity/strength
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
tdl, // tile_lma_debug_level, //+2, // int debug_level,
tileX, // int tileX, // just for debug output
tileY); // int tileY
disp_str[cTile] = null;
if (disparity_map != null){
disparity_map[DISPARITY_INDEX_HOR][tIndex] = poly_disp[0];
disparity_map[DISPARITY_INDEX_HOR_STRENGTH][tIndex] = poly_disp[1];
if (lma2 != null) {
disp_str[cTile] = lma2.lmaDisparityStrength(
imgdtt_params.lmas_max_rel_rms, // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
imgdtt_params.lmas_min_strength, // minimal composite strength (sqrt(average amp squared over absolute RMS)
imgdtt_params.lmas_min_ac, // minimal of A and C coefficients maximum (measures sharpest point/line)
imgdtt_params.lmas_max_area, //double lma_max_area, // maximal half-area (if > 0.0)
imgdtt_params.lma_str_scale, // convert lma-generated strength to match previous ones - scale
imgdtt_params.lma_str_offset // convert lma-generated strength to match previous ones - add to result
)[0];
if (tile_lma_debug_level > 0) {
double [][] ds_dbg = {disp_str[cTile]};
lma2.printStats(ds_dbg,1);
}
}
if (lma2 != null) {
disp_str[cTile] = lma2.lmaDisparityStrength(
imgdtt_params.lmas_max_rel_rms, // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
imgdtt_params.lmas_min_strength, // minimal composite strength (sqrt(average amp squared over absolute RMS)
imgdtt_params.lmas_min_ac, // minimal of A and C coefficients maximum (measures sharpest point/line)
imgdtt_params.lmas_max_area, //double lma_max_area, // maximal half-area (if > 0.0)
imgdtt_params.lma_str_scale, // convert lma-generated strength to match previous ones - scale
imgdtt_params.lma_str_offset // convert lma-generated strength to match previous ones - add to result
)[0];
if (tile_lma_debug_level > 0) {
double [][] ds_dbg = {disp_str[cTile]};
lma2.printStats(ds_dbg,1);
}
if (disparity_map != null){
if (disp_str[cTile] != null) {
disparity_map[DISPARITY_INDEX_POLY][tIndex] = disp_str[cTile][0];
disparity_map[DISPARITY_INDEX_POLY+1][tIndex] = disp_str[cTile][1];
}
}
}
//} // end of if (corr_stat != null)
} // if (disparity_map != null){ // not null - calculate correlations
}
//} // end of if (corr_stat != null)
// } // if (disparity_map != null){ // not null - calculate correlations
// only debug is left
}
}
......@@ -2315,72 +2328,80 @@ public class ImageDtt {
lazy_eye_data[nCluster] = null;
}
// just for debugging, can be removed
/*
double [][] lma2_ds = lma2.lmaDisparityStrength(
imgdtt_params.lma_max_rel_rms, // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
imgdtt_params.lma_min_strength, // minimal composite strength (sqrt(average amp squared over absolute RMS)
imgdtt_params.lma_min_ac, // minimal of A and C coefficients maximum (measures sharpest point/line)
imgdtt_params.lma_max_area, //double lma_max_area, // maximal half-area (if > 0.0)
imgdtt_params.lma_str_scale, // convert lma-generated strength to match previous ones - scale
imgdtt_params.lma_str_offset); // convert lma-generated strength to match previous ones - add to result
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
if (tileY < tilesY) {
for (int cTileX = 0; cTileX < tileStep; cTileX++) {
tileX = clustX * tileStep + cTileX ;
if (tileX < tilesX) {
cTile = cTileY * tileStep + cTileX;
tIndex = tileY * tilesX + tileX;
// int nTile = tileY * tilesX + tileX; // how is it different from tIndex?
for (int cam = 0; cam < ddnd.length; cam++) {
if (ddnd[cam] != null) {
if (imgdtt_params.lma_diff_xy) {
clt_mismatch[3*cam + 0][tIndex] =
ddnd[cam][0] * rXY[cam][0] - ddnd[cam][1] * rXY[cam][1];
clt_mismatch[3*cam + 1][tIndex] =
ddnd[cam][0] * rXY[cam][1] + ddnd[cam][1] * rXY[cam][0];
} else {
clt_mismatch[3*cam + 0][tIndex] = ddnd[cam][0];
clt_mismatch[3*cam + 1][tIndex] = ddnd[cam][1];
if (disparity_map != null){
double [][] lma2_ds = lma2.lmaDisparityStrength(
imgdtt_params.lma_max_rel_rms, // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
imgdtt_params.lma_min_strength, // minimal composite strength (sqrt(average amp squared over absolute RMS)
imgdtt_params.lma_min_ac, // minimal of A and C coefficients maximum (measures sharpest point/line)
imgdtt_params.lma_max_area, //double lma_max_area, // maximal half-area (if > 0.0)
imgdtt_params.lma_str_scale, // convert lma-generated strength to match previous ones - scale
imgdtt_params.lma_str_offset); // convert lma-generated strength to match previous ones - add to result
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
if (tileY < tilesY) {
for (int cTileX = 0; cTileX < tileStep; cTileX++) {
tileX = clustX * tileStep + cTileX ;
if (tileX < tilesX) {
cTile = cTileY * tileStep + cTileX;
tIndex = tileY * tilesX + tileX;
// int nTile = tileY * tilesX + tileX; // how is it different from tIndex?
for (int cam = 0; cam < ddnd.length; cam++) {
if ((clt_mismatch != null) && (ddnd[cam] != null)) {
if (imgdtt_params.lma_diff_xy) {
clt_mismatch[3*cam + 0][tIndex] =
ddnd[cam][0] * rXY[cam][0] - ddnd[cam][1] * rXY[cam][1];
clt_mismatch[3*cam + 1][tIndex] =
ddnd[cam][0] * rXY[cam][1] + ddnd[cam][1] * rXY[cam][0];
} else {
clt_mismatch[3*cam + 0][tIndex] = ddnd[cam][0];
clt_mismatch[3*cam + 1][tIndex] = ddnd[cam][1];
}
}
if (stats != null) {
disparity_map[IMG_DIFF0_INDEX+0][tIndex] = stats[0];
disparity_map[IMG_DIFF0_INDEX+1][tIndex] = stats[1];
disparity_map[IMG_DIFF0_INDEX+2][tIndex] = stats[2];
// disparity_map[IMG_DIFF0_INDEX+3][tIndex] = stats[3];
}
if ((lma2_ds != null) && ((lma2_ds[cTile] != null))) {
// composite new disparity
disparity_map[DISPARITY_INDEX_VERT][tIndex] = lma2_ds[cTile][0]+ disparity_array[tileY][tileX] + disparity_corr;
disparity_map[DISPARITY_INDEX_VERT_STRENGTH][tIndex] = lma2_ds[cTile][1];
if (clt_mismatch != null) {
clt_mismatch[3*0 + 2][tIndex] =
(lma2_ds[cTile][1] - imgdtt_params.lma_str_offset)/imgdtt_params.lma_str_scale - imgdtt_params.lma_min_strength;
}
}
}
if (stats != null) {
disparity_map[IMG_DIFF0_INDEX+0][tIndex] = stats[0];
disparity_map[IMG_DIFF0_INDEX+1][tIndex] = stats[1];
disparity_map[IMG_DIFF0_INDEX+2][tIndex] = stats[2];
// disparity_map[IMG_DIFF0_INDEX+3][tIndex] = stats[3];
}
if ((lma2_ds != null) && ((lma2_ds[cTile] != null))) {
disparity_map[DISPARITY_INDEX_VERT][tIndex] = lma2_ds[cTile][0];
disparity_map[DISPARITY_INDEX_VERT_STRENGTH][tIndex] = lma2_ds[cTile][1];
clt_mismatch[3*0 + 2][tIndex] =
(lma2_ds[cTile][1] - imgdtt_params.lma_str_offset)/imgdtt_params.lma_str_scale - imgdtt_params.lma_min_strength;
}
}
if (extra_stats != null) {
if (extra_stats[cTile] != null) {
disparity_map[DISPARITY_INDEX_CM+1][tIndex] = extra_stats[cTile][0];
disparity_map[DISPARITY_VARIATIONS_INDEX][tIndex] = extra_stats[cTile][2];
disparity_map[OVEREXPOSED][tIndex] = extra_stats[cTile][3];
clt_mismatch[3*1 + 2][tIndex] = extra_stats[cTile][0];
clt_mismatch[3*2 + 2][tIndex] = extra_stats[cTile][2];
clt_mismatch[3*3 + 2][tIndex] = extra_stats[cTile][3];
} else {
disparity_map[DISPARITY_INDEX_CM+1][tIndex] = Double.NaN;
disparity_map[DISPARITY_VARIATIONS_INDEX][tIndex] = Double.NaN;
disparity_map[OVEREXPOSED][tIndex] = Double.NaN;
clt_mismatch[3*1 + 2][tIndex] = Double.NaN;
clt_mismatch[3*2 + 2][tIndex] = Double.NaN;
clt_mismatch[3*3 + 2][tIndex] = Double.NaN;
if (extra_stats != null) {
if (extra_stats[cTile] != null) {
disparity_map[DISPARITY_INDEX_CM+1][tIndex] = extra_stats[cTile][0];
disparity_map[DISPARITY_VARIATIONS_INDEX][tIndex] = extra_stats[cTile][2];
disparity_map[OVEREXPOSED][tIndex] = extra_stats[cTile][3];
if (clt_mismatch != null) {
clt_mismatch[3*1 + 2][tIndex] = extra_stats[cTile][0];
clt_mismatch[3*2 + 2][tIndex] = extra_stats[cTile][2];
clt_mismatch[3*3 + 2][tIndex] = extra_stats[cTile][3];
}
} else {
disparity_map[DISPARITY_INDEX_CM+1][tIndex] = Double.NaN;
disparity_map[DISPARITY_VARIATIONS_INDEX][tIndex] = Double.NaN;
disparity_map[OVEREXPOSED][tIndex] = Double.NaN;
if (clt_mismatch != null) {
clt_mismatch[3*1 + 2][tIndex] = Double.NaN;
clt_mismatch[3*2 + 2][tIndex] = Double.NaN;
clt_mismatch[3*3 + 2][tIndex] = Double.NaN;
}
}
}
}
}
}
}
}
*/
/**/
}
}
}
......@@ -2464,13 +2485,14 @@ public class ImageDtt {
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
final boolean debug_distort= true;
// final double [][] debug_offsets = null;
// final double [][] debug_offsets = {{0.5, 0.5},{0.0,0.0},{0.0,0.0},{-0.5,-0.5}}; // add to calculated CenterXY for evaluating new LMA
// final double [][] debug_offsets = {{ 0.5, 0.5},{ 0.5,-0.5},{-0.5, 0.5},{-0.5,-0.5}}; // add to calculated CenterXY for evaluating new LMA
// final double [][] debug_offsets = {{ 0.5, 0.0},{ -0.5, 0.0},{-0.5, 0.0},{ 0.5, 0.0}}; // add to calculated CenterXY for evaluating new LMA
// final double [][] debug_offsets = {{ 1.0, 0.0},{ -1.0, 0.0},{-1.0, 0.0},{ 1.0, 0.0}}; // add to calculated CenterXY for evaluating new LMA
final double [][] debug_offsets = {{ 0.0, 1.0},{ 0.0, -1.0},{ 0.0, -1.0},{ 0.0, 1.0}}; // add to calculated CenterXY for evaluating new LMA
final boolean debug_distort= globalDebugLevel > 0; ///false; // true;
final double [][] debug_offsets = new double[imgdtt_params.lma_dbg_offset.length][2];
for (int i = 0; i < debug_offsets.length; i++) for (int j = 0; j < debug_offsets[i].length; j++) {
debug_offsets[i][j] = imgdtt_params.lma_dbg_offset[i][j]*imgdtt_params.lma_dbg_scale;
}
final boolean macro_mode = macro_scale != 1; // correlate tile data instead of the pixel data
final int quad = 4; // number of subcameras
final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
......@@ -2729,7 +2751,7 @@ public class ImageDtt {
corr_mask &= ~ (1 << i);
}
}
boolean debugTile =(tileX == debug_tileX) && (tileY == debug_tileY);
boolean debugTile =(tileX == debug_tileX) && (tileY == debug_tileY) && (globalDebugLevel > -1);
final int [] overexp_all = (saturation_imp != null) ? ( new int [2]): null;
......@@ -2823,13 +2845,14 @@ public class ImageDtt {
centersXY[ip][1] -= shiftXY[ip][1];
}
// save disparity distortions for visualization:
for (int cam = 0; cam <quad; cam++) {
dbg_distort[cam * 4 + 0 ][nTile] = disp_dist[cam][0];
dbg_distort[cam * 4 + 1 ][nTile] = disp_dist[cam][1];
dbg_distort[cam * 4 + 2 ][nTile] = disp_dist[cam][2];
dbg_distort[cam * 4 + 3 ][nTile] = disp_dist[cam][3];
if (dbg_distort != null) {
for (int cam = 0; cam <quad; cam++) {
dbg_distort[cam * 4 + 0 ][nTile] = disp_dist[cam][0];
dbg_distort[cam * 4 + 1 ][nTile] = disp_dist[cam][1];
dbg_distort[cam * 4 + 2 ][nTile] = disp_dist[cam][2];
dbg_distort[cam * 4 + 3 ][nTile] = disp_dist[cam][3];
}
}
// TODO: use correction after disparity applied (to work for large disparity values)
if (fine_corr != null){
......@@ -3049,8 +3072,8 @@ public class ImageDtt {
}
}// end of for (int chn = 0; chn <numcol; chn++)
// used in lwir
int tile_lma_debug_level = ((tileX == debug_tileX) && (tileY == debug_tileY))? imgdtt_params.lma_debug_level : -1;
// int tile_lma_debug_level = ((tileX == debug_tileX) && (tileY == debug_tileY))? imgdtt_params.lma_debug_level : -1;
int tile_lma_debug_level = ((tileX == debug_tileX) && (tileY == debug_tileY))? (imgdtt_params.lma_debug_level-1) : -2;
// all color channels are done here
double extra_disparity = 0.0; // used for textures: if allowed, shift images extra before trying to combine
......
......@@ -99,7 +99,7 @@ public class ImageDttParameters {
public double corr_wndx_blur = 5.0; // 100% to 0 % vertical transition range
// LMA parameters
public double lma_disp_range = 2.0; // disparity range to combine in one cluster (to mitigate ERS
public double lma_disp_range = 5.0; // disparity range to combine in one cluster (to mitigate ERS
// LMA single parameters
public boolean lmas_gaussian = false; // model correlation maximum as a Gaussian (false - as a parabola)
public boolean lmas_adjust_wm = true; // used in new for width
......@@ -110,16 +110,16 @@ public class ImageDttParameters {
public double lmas_poly_str_min = 0.05; // ignore tiles with poly strength (scaled) below
public double lmas_lambda_initial = 0.03; //
public double lmas_rms_diff = 0.001; //
public double lmas_rms_diff = 0.0003; //
public int lmas_num_iter = 20; //
// Filtering and strength calculation
public double lmas_max_rel_rms = 0.2; // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
public double lmas_min_strength = 1.0; // minimal composite strength (sqrt(average amp squared over absolute RMS)
public double lmas_min_ac = 0.03; // minimal of a and C coefficients maximum (measures sharpest point/line)
public double lmas_max_rel_rms = 0.3; // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
public double lmas_min_strength = 0.7; // minimal composite strength (sqrt(average amp squared over absolute RMS)
public double lmas_min_ac = 0.02; // minimal of a and C coefficients maximum (measures sharpest point/line)
public double lmas_max_area = 0.0; // maximal half-area (if > 0.0)
public boolean lma_gaussian = false; // model correlation maximum as a Gaussian (false - as a parabola)
public boolean lma_second = false; // re-run LMA after removing weak/failed tiles
public boolean lma_second = true; // re-run LMA after removing weak/failed tiles
public boolean lma_second_gaussian = false; // re-run after removing weal/failed in Gaussian mode
public boolean lma_adjust_wm = true; // used in new for width
public boolean lma_adjust_wy = true; // false; // used in new for ellipse
......@@ -128,7 +128,7 @@ public class ImageDttParameters {
public boolean lma_adjust_ag = true; // used in new for gains
// new LMA parameters
public double lma_wnd = 1.5; // raise cosine window to this power (1.0 - just 2D cosine)
public double lma_wnd = 1.0; //1.5; // raise cosine window to this power (1.0 - just 2D cosine)
public double lma_min_wnd = 0.4; // divide values by the 2D correlation window if it is >= this value for finding maximums and convex areas
public double lma_wnd_pwr = 0.8; // Raise window for finding a maximum and a convex region to this power
public int lma_hard_marg = 1; // Zero out this width margins before blurring
......@@ -148,13 +148,13 @@ public class ImageDttParameters {
public double lma_lambda_scale_good = 0.5; //
public double lma_lambda_scale_bad = 8.0; //
public double lma_lambda_max = 100.0; //
public double lma_rms_diff = 0.001; //
public int lma_num_iter = 20; //
public double lma_rms_diff = 0.003; //
public int lma_num_iter = 10; //
// Filtering and strength calculation
public double lma_max_rel_rms = 0.12; // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
public double lma_min_strength = 1.25; // minimal composite strength (sqrt(average amp squared over absolute RMS)
public double lma_min_ac = 0.15; // minimal of a and C coefficients maximum (measures sharpest point/line)
public double lma_max_area = 30.0; // maximal half-area (if > 0.0)
public double lma_max_rel_rms = 0.2; // maximal relative (to average max/min amplitude LMA RMS) // May be up to 0.3)
public double lma_min_strength = 1.0; // minimal composite strength (sqrt(average amp squared over absolute RMS)
public double lma_min_ac = 0.05; // minimal of a and C coefficients maximum (measures sharpest point/line)
public double lma_max_area = 45.0; // maximal half-area (if > 0.0)
public double lma_str_scale = 0.2; // convert lma-generated strength to match previous ones - scale
public double lma_str_offset = 0.05; // convert lma-generated strength to match previous ones - add to result
......@@ -162,8 +162,8 @@ public class ImageDttParameters {
// Lazy eye results interpretation
public boolean lma_diff_xy = true; // convert dd/nd to x,y
public double lma_diff_minw = 0.5; // minimal weight to keep
public double lma_diff_sigma = 1.0; // blur differential data (relative to the cluster linear size)
public double lma_diff_minw = 0.07; // minimal weight to keep
public double lma_diff_sigma = 2.0; // blur differential data (relative to the cluster linear size)
public int lma_debug_level = 0; //
public int lma_debug_level1 = 0; //
......
......@@ -4674,7 +4674,7 @@ public class QuadCLT {
final boolean updateStatus,
int debugLevel){
if (debugLevel > -2) { // -1) {
if (debugLevel > -2) { // -1
debugLevel = clt_parameters.ly_debug_level;
}
......@@ -4746,34 +4746,37 @@ public class QuadCLT {
boolean apply_extrinsic = (clt_parameters.ly_corr_scale != 0.0);
GeometryCorrection.CorrVector corr_vector = ea.solveCorr (
clt_parameters.ly_inf_en, // boolean use_disparity, // adjust disparity-related extrinsics
clt_parameters.ly_aztilt_en, // boolean use_aztilts, // Adjust azimuths and tilts excluding disparity
clt_parameters.ly_marg_fract, // double marg_fract, // part of half-width, and half-height to reduce weights
clt_parameters.ly_inf_en, // boolean use_disparity, // adjust disparity-related extrinsics
clt_parameters.ly_aztilt_en, // boolean use_aztilts, // Adjust azimuths and tilts excluding disparity
clt_parameters.ly_diff_roll_en,//boolean use_diff_rolls, // Adjust differential rolls (3 of 4 angles)
clt_parameters.ly_inf_force, // boolean force_convergence, // if true try to adjust convergence (disparity, symmetrical parameter 0) even with no disparity
// clt_parameters.ly_inf_force, // boolean force_convergence, // if true try to adjust convergence (disparity, symmetrical parameter 0) even with no disparity
clt_parameters.ly_min_forced, // int min_num_forced, // minimal number of clusters with forced disparity to use it
// data, using just radial distortions
clt_parameters.ly_com_roll, //boolean common_roll, // Enable common roll (valid for high disparity range only)
clt_parameters.ly_focalLength, //boolean corr_focalLength, // Correct scales (focal length temperature? variations)
clt_parameters.ly_ers_rot, // boolean ers_rot, // Enable ERS correction of the camera rotation
clt_parameters.ly_ers_lin, // boolean ly_ers_lin, // Enable ERS correction of the camera linear movement
clt_parameters.ly_ers_rot, // boolean ers_rot, // Enable ERS correction of the camera rotation
clt_parameters.ly_ers_forw, // boolean ers_forw, // Enable ERS correction of the camera linear movement in z direction
clt_parameters.ly_ers_side, // boolean ers_side, // Enable ERS correction of the camera linear movement in x direction
clt_parameters.ly_ers_vert, // boolean ers_vert, // Enable ERS correction of the camera linear movement in y direction
// add balancing-related here?
clt_parameters.ly_par_sel, // int manual_par_sel, // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
1.0, // double weight_disparity,
1.0, // double weight_lazyeye,
dsxy, // double [][] measured_dsxy,
null, // boolean [] force_disparity, // boolean [] force_disparity,
geometryCorrection, // GeometryCorrection geometryCorrection,
false, // boolean use_main, // corr_rots_aux != null;
geometryCorrection.getCorrVector(), // GeometryCorrection.CorrVector corr_vector,
old_new_rms, // double [] old_new_rms, // should be double[2]
debugLevel + 5);// int debugLevel)
debugLevel); // + 5);// int debugLevel)
if (debugLevel > -1){
if (debugLevel > -2){
System.out.println("Old extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
if (corr_vector != null) {
GeometryCorrection.CorrVector diff_corr = corr_vector.diffFromVector(geometryCorrection.getCorrVector());
if (debugLevel > -1){
if (debugLevel > -2){
System.out.println("New extrinsic corrections:");
System.out.println(corr_vector.toString());
......@@ -4796,7 +4799,7 @@ public class QuadCLT {
System.out.println("Correction is not applied according clt_parameters.ly_corr_scale == 0.0) ");
}
} else {
if (debugLevel > -2){
if (debugLevel > -3){
System.out.println("LMA failed");
}
}
......@@ -5042,9 +5045,40 @@ public class QuadCLT {
true,
name+sAux()+"-CLT_MISMATCH-D"+clt_parameters.disparity+"_"+clt_parameters.tileStep+"x"+clt_parameters.tileStep,
ExtrinsicAdjustment.DATA_TITLES);
if (disparity_map != null){
int target_index = ImageDtt.DISPARITY_INDEX_INT;
int cm_index = ImageDtt.DISPARITY_INDEX_INT+1;
int lma_index = ImageDtt.DISPARITY_INDEX_VERT;
int strength_index = ImageDtt.DISPARITY_STRENGTH_INDEX;
double [][] scan_maps = new double[3][tilesX*tilesY];
for (int i = 0; i < scan_maps[0].length; i++) {
scan_maps[1][i] = disparity_map[lma_index][i];
scan_maps[2][i] = disparity_map[strength_index][i];
if (Double.isNaN(disparity_map[lma_index][i])) {
if (Double.isNaN(disparity_map[cm_index][i])) {
scan_maps[0][i] = disparity_map[target_index][i]; // TODO: add offset calculated from neighbours
} else {
scan_maps[0][i] = disparity_map[cm_index][i];
}
} else {
scan_maps[0][i] = disparity_map[lma_index][i];
}
}
String [] titles3 = {"combo", "lma", "strength"};
sdfa_instance.showArrays(
scan_maps,
tilesX,
tilesY,
true,
name+sAux()+"-DISP_MAP-D"+clt_parameters.disparity+"-CLT",
titles3);
}
}
/*
/**/
if (disparity_map != null){
if (!batch_mode && clt_parameters.show_map && (debugLevel > -2)){
sdfa_instance.showArrays(
......@@ -5055,6 +5089,7 @@ public class QuadCLT {
name+sAux()+"-DISP_MAP-D"+clt_parameters.disparity,
ImageDtt.DISPARITY_TITLES);
}
/*
if (clt_mismatch != null) {
sdfa_instance.showArrays(
clt_mismatch,
......@@ -5135,9 +5170,9 @@ public class QuadCLT {
tilesY,
true,
name+sAux()+"-CLT_MISMATCH-BLUR-D"+clt_parameters.disparity);
}
} */
}
*/
/**/
return results;
}
......@@ -7621,152 +7656,125 @@ public class QuadCLT {
dbg_combo_use};
(new ShowDoubleFloatArrays()).showArrays(dbg_img, tp.getTilesX(), tp.getTilesY(), true, "extrinsics_bgnd_combo",titles);
}
AlignmentCorrection ac = new AlignmentCorrection(this);
AlignmentCorrection ac = null;
if (!clt_parameters.ly_lma_ers ) {
ac = new AlignmentCorrection(this);
}
// iteration steps
// if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel >-1)) {
if (clt_parameters.show_extrinsic && (debugLevel >-3)) { // temporary
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_post"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_post"); //String title)
}
double comp_diff = min_sym_update + 1; // (> min_sym_update)
for (int num_iter = 0; num_iter < max_tries; num_iter++){
if (update_disp_from_latest) {
tp.clt_3d_passes.get(combo_scan).updateDisparity();
}
double [][] bg_mismatch = new double[12][];
double [][] combo_mismatch = new double[12][];
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
bg_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
bg_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
combo_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
double [][] scans14 = new double [28][];
scans14[14 * 0 + 0] = tp.clt_3d_passes.get(bg_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM]; // .getDisparity(0);
scans14[14 * 0 + 1] = tp.clt_3d_passes.get(bg_scan).getStrength();
scans14[14 * 1 + 0] = tp.clt_3d_passes.get(combo_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM];
scans14[14 * 1 + 1] = tp.clt_3d_passes.get(combo_scan).getStrength();
for (int i = 0; i < bg_mismatch.length; i++) {
scans14[14 * 0 + 2 + i] = bg_mismatch[i];
scans14[14 * 1 + 2 + i] = combo_mismatch[i];
}
if (debugLevelInner > 0) {
(new ShowDoubleFloatArrays()).showArrays(scans14, tp.getTilesX(), tp.getTilesY(), true, "scans_14"); // , titles);
}
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel > 1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_iter"); //String title)
if (clt_parameters.ly_lma_ers) {
CLTMeasureLY( // perform single pass according to prepared tiles operations and disparity // USED in lwir
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan, // final int scanIndex,
// only combine and calculate once, next passes keep
(num_iter >0)? -1: bg_scan, // final int bgIndex, // combine, if >=0
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner + 7); // - 1); // -5-1
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_iter"); //String title)
}
double [][] target_disparity = {tp.clt_3d_passes.get(bg_scan).getDisparity(0), tp.clt_3d_passes.get(combo_scan).getDisparity(0)};
int num_tiles = tp.clt_3d_passes.get(combo_scan).getStrength().length;
// TODO: fix above for using GT
// use lazyEyeCorrectionFromGT(..) when ground truth data is available
double [][][] new_corr = ac.lazyEyeCorrection(
adjust_poly, // final boolean use_poly,
true, // final boolean restore_disp_inf, // Restore subtracted disparity for scan #0 (infinity)
clt_parameters.fcorr_radius, // final double fcorr_radius,
clt_parameters.fcorr_inf_strength, // final double min_strenth,
clt_parameters.fcorr_inf_diff, // final double max_diff,
// 1.3, // final double comp_strength_var,
clt_parameters.inf_iters, // 20, // 0, // final int max_iterations,
clt_parameters.inf_final_diff, // 0.0001, // final double max_coeff_diff,
clt_parameters.inf_far_pull, // 0.0, // 0.25, // final double far_pull, // = 0.2; // 1; // 0.5;
clt_parameters.inf_str_pow, // 1.0, // final double strength_pow,
0.8*clt_parameters.disp_scan_step, // 1.5, // final double lazyEyeCompDiff, // clt_parameters.fcorr_disp_diff
clt_parameters.ly_smpl_side, // 3, // final int lazyEyeSmplSide, // = 2; // Sample size (side of a square)
clt_parameters.ly_smpl_num, // 5, // final int lazyEyeSmplNum, // = 3; // Number after removing worst (should be >1)
clt_parameters.ly_smpl_rms, // 0.1, // final double lazyEyeSmplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
clt_parameters.ly_disp_var, // 0.2, // final double lazyEyeDispVariation, // 0.2, maximal full disparity difference between tgh tile and 8 neighborxs
clt_parameters.ly_disp_rvar, // 0.2, // final double lazyEyeDispRelVariation, // 0.02 Maximal relative full disparity difference to 8 neighbors
clt_parameters.ly_norm_disp, // final double ly_norm_disp, // = 5.0; // Reduce weight of higher disparity tiles
clt_parameters.inf_smpl_side, // 3, // final int smplSide, // = 2; // Sample size (side of a square)
clt_parameters.inf_smpl_num, // 5, // final int smplNum, // = 3; // Number after removing worst (should be >1)
clt_parameters.inf_smpl_rms, // 0.1, // 0.05, // final double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
// histogram parameters
clt_parameters.ih_smpl_step, // 8, // final int hist_smpl_side, // 8 x8 masked, 16x16 sampled
clt_parameters.ih_disp_min, // -1.0, // final double hist_disp_min,
clt_parameters.ih_disp_step, // 0.05, // final double hist_disp_step,
clt_parameters.ih_num_bins, // 40, // final int hist_num_bins,
clt_parameters.ih_sigma, // 0.1, // final double hist_sigma,
clt_parameters.ih_max_diff, // 0.1, // final double hist_max_diff,
clt_parameters.ih_min_samples, // 10, // final int hist_min_samples,
clt_parameters.ih_norm_center, // true, // final boolean hist_norm_center, // if there are more tiles that fit than min_samples, replace with
clt_parameters.ly_inf_frac, // 0.5, // final double inf_fraction, // fraction of the weight for the infinity tiles
"LY_combo_scan-"+combo_scan+"_post"); //String title)
int tilesX = tp.getTilesX();
int tilesY = tp.getTilesY();
int cluster_size =clt_parameters.tileStep;
int clustersX= (tilesX + cluster_size - 1) / cluster_size;
int clustersY= (tilesY + cluster_size - 1) / cluster_size;
ExtrinsicAdjustment ea = new ExtrinsicAdjustment(
geometryCorrection, // GeometryCorrection gc,
clt_parameters.tileStep, // int clusterSize,
clustersX, // int clustersX,
clustersY); // int clustersY);
double [] old_new_rms = new double[2];
boolean apply_extrinsic = (clt_parameters.ly_corr_scale != 0.0);
CLTPass3d scan = tp.clt_3d_passes.get(combo_scan);
GeometryCorrection.CorrVector corr_vector = ea.solveCorr (
clt_parameters.ly_marg_fract, // double marg_fract, // part of half-width, and half-height to reduce weights
clt_parameters.ly_inf_en, // boolean use_disparity, // adjust disparity-related extrinsics
clt_parameters.ly_aztilt_en, // boolean use_aztilts, // Adjust azimuths and tilts excluding disparity
clt_parameters.ly_diff_roll_en, // boolean use_diff_rolls, // Adjust differential rolls (3 of 4 angles)
// clt_parameters.ly_inf_force, // boolean force_convergence, // if true try to adjust convergence (disparity, symmetrical parameter 0) even with no disparity
clt_parameters.ly_min_forced, // int min_num_forced, // minimal number of clusters with forced disparity to use it
// data, using just radial distortions
clt_parameters.ly_com_roll, // boolean common_roll, // Enable common roll (valid for high disparity range only)
clt_parameters.ly_focalLength , // boolean corr_focalLength, // Correct scales (focal length temperature? variations)
clt_parameters.ly_ers_rot, // boolean ers_rot, // Enable ERS correction of the camera rotation
clt_parameters.ly_ers_forw, // boolean ers_forw, // Enable ERS correction of the camera linear movement in z direction
clt_parameters.ly_ers_side, // boolean ers_side, // Enable ERS correction of the camera linear movement in x direction
clt_parameters.ly_ers_vert, // boolean ers_vert, // Enable ERS correction of the camera linear movement in y direction
// add balancing-related here?
clt_parameters.ly_par_sel, // int manual_par_sel, // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
1.0, // double weight_disparity,
1.0, // double weight_lazyeye,
scan.getLazyEyeData(), // dsxy, // double [][] measured_dsxy,
scan.getLazyEyeForceDisparity(), // null, // boolean [] force_disparity, // boolean [] force_disparity,
false, // boolean use_main, // corr_rots_aux != null;
geometryCorrection.getCorrVector(), // GeometryCorrection.CorrVector corr_vector,
old_new_rms, // double [] old_new_rms, // should be double[2]
debugLevel); // + 5);// int debugLevel)
if (debugLevel > -2){
System.out.println("Old extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
if (corr_vector != null) {
GeometryCorrection.CorrVector diff_corr = corr_vector.diffFromVector(geometryCorrection.getCorrVector());
comp_diff = diff_corr.getNorm();
if (debugLevel > -2){
System.out.println("New extrinsic corrections:");
System.out.println(corr_vector.toString());
System.out.println("Increment extrinsic corrections:");
System.out.println(diff_corr.toString());
// System.out.println("Correction scale = "+clt_parameters.ly_corr_scale);
clt_parameters.getLyPerQuad(num_tiles), // final int min_per_quadrant, // minimal tiles per quadrant (not counting the worst) tp proceed
clt_parameters.getLyInf(num_tiles), // final int min_inf, // minimal number of tiles at infinity to proceed
clt_parameters.getLyInfScale(num_tiles),// final int min_inf_to_scale, // minimal number of tiles at infinity to apply weight scaling
}
clt_parameters.ly_right_left, // false // equalize weights of right/left FoV (use with horizon in both halves and gross infinity correction)
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
scans14, // disp_strength, // scans, // double [][] disp_strength,
target_disparity, // double [][] target_disparity, // null or programmed disparity (1 per each 14 entries of scans_14)
tp.getTilesX(), // int tilesX,
clt_parameters.corr_magic_scale, // double magic_coeff, // still not understood coefficent that reduces reported disparity value. Seems to be around 8.5
debugLevelInner - 1); // + (clt_parameters.fine_dbg ? 1:0)); // int debugLevel)
if (new_corr == null) {
return false;
}
comp_diff = 0.0;
int num_pars = 0;
if (adjust_poly) {
apply_fine_corr(
new_corr,
debugLevelInner + 2);
for (int n = 0; n < new_corr.length; n++){
for (int d = 0; d < new_corr[n].length; d++){
for (int i = 0; i < new_corr[n][d].length; i++){
comp_diff += new_corr[n][d][i] * new_corr[n][d][i];
num_pars++;
}
if (apply_extrinsic){
geometryCorrection.setCorrVector(corr_vector) ;
System.out.println("Extrinsic correction updated (can be disabled by setting clt_parameters.ly_corr_scale = 0.0) ");
} else {
System.out.println("Correction is not applied according clt_parameters.ly_corr_scale == 0.0) ");
}
}
comp_diff = Math.sqrt(comp_diff/num_pars);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_poly_update)) {
System.out.println("#### fine correction iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_poly_update+")");
} else {
if (debugLevel > -3){
System.out.println("LMA failed");
}
}
if (comp_diff < min_poly_update) { // add other parameter to exit from poly
break;
}
} else {
for (int i = 0; i < new_corr[0][0].length; i++){
comp_diff += new_corr[0][0][i] * new_corr[0][0][i];
}
comp_diff = Math.sqrt(comp_diff);
boolean done = (comp_diff < min_sym_update) || (num_iter == (max_tries - 1));
// System.out.println("done="+done);
// System.out.println("done="+done);
if (debugLevel > -10) { // should work even in batch mode
System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]+ " (debugLevel = "+debugLevel+")");
comp_diff + " ("+min_sym_update+"), previous RMS = " + old_new_rms[0]+
" final RMS = " + old_new_rms[1]+ " (debugLevel = "+debugLevel+")");
}
if (debugLevel > -10) {
if ((debugLevel > -1) || done) {
// System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
// comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]);
// System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
// comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]);
System.out.println("New extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
......@@ -7775,6 +7783,169 @@ public class QuadCLT {
if (comp_diff < min_sym_update) {
break;
}
if (update_disp_from_latest) {
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
null, // combo_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
}
} else {
double [][] bg_mismatch = new double[12][];
double [][] combo_mismatch = new double[12][];
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
bg_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
bg_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
combo_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
double [][] scans14 = new double [28][];
scans14[14 * 0 + 0] = tp.clt_3d_passes.get(bg_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM]; // .getDisparity(0);
scans14[14 * 0 + 1] = tp.clt_3d_passes.get(bg_scan).getStrength();
scans14[14 * 1 + 0] = tp.clt_3d_passes.get(combo_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM];
scans14[14 * 1 + 1] = tp.clt_3d_passes.get(combo_scan).getStrength();
for (int i = 0; i < bg_mismatch.length; i++) {
scans14[14 * 0 + 2 + i] = bg_mismatch[i];
scans14[14 * 1 + 2 + i] = combo_mismatch[i];
}
if (debugLevelInner > 0) {
(new ShowDoubleFloatArrays()).showArrays(scans14, tp.getTilesX(), tp.getTilesY(), true, "scans_14"); // , titles);
}
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel > 1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_iter"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_iter"); //String title)
}
double [][] target_disparity = {tp.clt_3d_passes.get(bg_scan).getDisparity(0), tp.clt_3d_passes.get(combo_scan).getDisparity(0)};
int num_tiles = tp.clt_3d_passes.get(combo_scan).getStrength().length;
// TODO: fix above for using GT
// use lazyEyeCorrectionFromGT(..) when ground truth data is available
double [][][] new_corr = ac.lazyEyeCorrection(
adjust_poly, // final boolean use_poly,
true, // final boolean restore_disp_inf, // Restore subtracted disparity for scan #0 (infinity)
clt_parameters.fcorr_radius, // final double fcorr_radius,
clt_parameters.fcorr_inf_strength, // final double min_strenth,
clt_parameters.fcorr_inf_diff, // final double max_diff,
// 1.3, // final double comp_strength_var,
clt_parameters.inf_iters, // 20, // 0, // final int max_iterations,
clt_parameters.inf_final_diff, // 0.0001, // final double max_coeff_diff,
clt_parameters.inf_far_pull, // 0.0, // 0.25, // final double far_pull, // = 0.2; // 1; // 0.5;
clt_parameters.inf_str_pow, // 1.0, // final double strength_pow,
0.8*clt_parameters.disp_scan_step, // 1.5, // final double lazyEyeCompDiff, // clt_parameters.fcorr_disp_diff
clt_parameters.ly_smpl_side, // 3, // final int lazyEyeSmplSide, // = 2; // Sample size (side of a square)
clt_parameters.ly_smpl_num, // 5, // final int lazyEyeSmplNum, // = 3; // Number after removing worst (should be >1)
clt_parameters.ly_smpl_rms, // 0.1, // final double lazyEyeSmplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
clt_parameters.ly_disp_var, // 0.2, // final double lazyEyeDispVariation, // 0.2, maximal full disparity difference between tgh tile and 8 neighborxs
clt_parameters.ly_disp_rvar, // 0.2, // final double lazyEyeDispRelVariation, // 0.02 Maximal relative full disparity difference to 8 neighbors
clt_parameters.ly_norm_disp, // final double ly_norm_disp, // = 5.0; // Reduce weight of higher disparity tiles
clt_parameters.inf_smpl_side, // 3, // final int smplSide, // = 2; // Sample size (side of a square)
clt_parameters.inf_smpl_num, // 5, // final int smplNum, // = 3; // Number after removing worst (should be >1)
clt_parameters.inf_smpl_rms, // 0.1, // 0.05, // final double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
// histogram parameters
clt_parameters.ih_smpl_step, // 8, // final int hist_smpl_side, // 8 x8 masked, 16x16 sampled
clt_parameters.ih_disp_min, // -1.0, // final double hist_disp_min,
clt_parameters.ih_disp_step, // 0.05, // final double hist_disp_step,
clt_parameters.ih_num_bins, // 40, // final int hist_num_bins,
clt_parameters.ih_sigma, // 0.1, // final double hist_sigma,
clt_parameters.ih_max_diff, // 0.1, // final double hist_max_diff,
clt_parameters.ih_min_samples, // 10, // final int hist_min_samples,
clt_parameters.ih_norm_center, // true, // final boolean hist_norm_center, // if there are more tiles that fit than min_samples, replace with
clt_parameters.ly_inf_frac, // 0.5, // final double inf_fraction, // fraction of the weight for the infinity tiles
clt_parameters.getLyPerQuad(num_tiles), // final int min_per_quadrant, // minimal tiles per quadrant (not counting the worst) tp proceed
clt_parameters.getLyInf(num_tiles), // final int min_inf, // minimal number of tiles at infinity to proceed
clt_parameters.getLyInfScale(num_tiles),// final int min_inf_to_scale, // minimal number of tiles at infinity to apply weight scaling
clt_parameters.ly_right_left, // false // equalize weights of right/left FoV (use with horizon in both halves and gross infinity correction)
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
scans14, // disp_strength, // scans, // double [][] disp_strength,
target_disparity, // double [][] target_disparity, // null or programmed disparity (1 per each 14 entries of scans_14)
tp.getTilesX(), // int tilesX,
clt_parameters.corr_magic_scale, // double magic_coeff, // still not understood coefficent that reduces reported disparity value. Seems to be around 8.5
debugLevelInner - 1); // + (clt_parameters.fine_dbg ? 1:0)); // int debugLevel)
if (new_corr == null) {
return false;
}
comp_diff = 0.0;
int num_pars = 0;
if (adjust_poly) {
apply_fine_corr(
new_corr,
debugLevelInner + 2);
for (int n = 0; n < new_corr.length; n++){
for (int d = 0; d < new_corr[n].length; d++){
for (int i = 0; i < new_corr[n][d].length; i++){
comp_diff += new_corr[n][d][i] * new_corr[n][d][i];
num_pars++;
}
}
}
comp_diff = Math.sqrt(comp_diff/num_pars);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_poly_update)) {
System.out.println("#### fine correction iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_poly_update+")");
}
}
if (comp_diff < min_poly_update) { // add other parameter to exit from poly
break;
}
} else {
for (int i = 0; i < new_corr[0][0].length; i++){
comp_diff += new_corr[0][0][i] * new_corr[0][0][i];
}
comp_diff = Math.sqrt(comp_diff);
boolean done = (comp_diff < min_sym_update) || (num_iter == (max_tries - 1));
// System.out.println("done="+done);
if (debugLevel > -10) { // should work even in batch mode
System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]+ " (debugLevel = "+debugLevel+")");
}
if (debugLevel > -10) {
if ((debugLevel > -1) || done) {
// System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
// comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]);
System.out.println("New extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
}
if (comp_diff < min_sym_update) {
break;
}
}
}
}
return true; // (comp_diff < (adjust_poly ? min_poly_update : min_sym_update));
......@@ -9651,7 +9822,7 @@ public class QuadCLT {
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
scanIndex, // final int scanIndex,
save_textures, // final boolean save_textures,
true, // final boolean save_corr,
true, // final boolean save_corr,
null, // final double [][] mismatch, // null or double [12][]
threadsMax, // final int threadsMax, // maximal number of threads to launch
updateStatus, // final boolean updateStatus,
......@@ -9725,15 +9896,17 @@ public class QuadCLT {
debugLevel); // final int debugLevel);
}
public CLTPass3d CLTMeasure( // perform single pass according to prepared tiles operations and disparity // USED in lwir
// final String image_name,
final double [][][] image_data, // first index - number of image in a quad
final boolean [][] saturation_imp, // (near) saturated pixels or null
final CLTParameters clt_parameters,
final CLTParameters clt_parameters,
final int scanIndex,
final boolean save_textures,
final boolean save_corr,
final double [][] mismatch, // null or double [12][]
final double [][] mismatch, // null or double [12][] or [numClusters][] for new LMA
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel)
......@@ -9802,7 +9975,6 @@ public class QuadCLT {
z_correction +=clt_parameters.z_corr_map.get(image_name);
}
final double disparity_corr = (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
image_dtt.clt_aberrations_quad_corr(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
1, // final int macro_scale, // to correlate tile data instead of the pixel data: 1 - pixels, 8 - tiles
......@@ -9828,8 +10000,8 @@ public class QuadCLT {
min_corr_selected, // 0.0001; // minimal correlation value to consider valid
clt_parameters.max_corr_sigma,// 1.5; // weights of points around global max to find fractional
clt_parameters.max_corr_radius,
// clt_parameters.enhortho_width, // 2; // reduce weight of center correlation pixels from center (0 - none, 1 - center, 2 +/-1 from center)
// clt_parameters.enhortho_scale, // 0.2; // multiply center correlation pixels (inside enhortho_width)
// clt_parameters.enhortho_width, // 2; // reduce weight of center correlation pixels from center (0 - none, 1 - center, 2 +/-1 from center)
// clt_parameters.enhortho_scale, // 0.2; // multiply center correlation pixels (inside enhortho_width)
clt_parameters.max_corr_double, // Double pass when masking center of mass to reduce preference for integer values
clt_parameters.corr_mode, // Correlation mode: 0 - integer max, 1 - center of mass, 2 - polynomial
clt_parameters.min_shot, // 10.0; // Do not adjust for shot noise if lower than
......@@ -9862,7 +10034,7 @@ public class QuadCLT {
scan.disparity_map = disparity_map;
scan.texture_tiles = texture_tiles;
scan.is_measured = true;
scan.is_measured = true; // but no disparity map/textures
scan.is_combo = false;
scan.resetProcessed();
return scan;
......@@ -10000,6 +10172,160 @@ public class QuadCLT {
}
public CLTPass3d CLTMeasureLY( // perform single pass according to prepared tiles operations and disparity // USED in lwir
final double [][][] image_data, // first index - number of image in a quad
final boolean [][] saturation_imp, // (near) saturated pixels or null
final CLTParameters clt_parameters,
final int scanIndex,
final int bgIndex, // combine, if >=0
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
int debugLevel)
{
final int dbg_x = -295-debugLevel;
final int dbg_y = -160-debugLevel;
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
final int cluster_size =clt_parameters.tileStep;
final int clustersX= (tilesX + cluster_size - 1) / cluster_size;
final int clustersY= (tilesY + cluster_size - 1) / cluster_size;
CLTPass3d scan = tp.clt_3d_passes.get(scanIndex);
scan.setLazyEyeClusterSize(cluster_size);
boolean [] force_disparity= new boolean[clustersX * clustersY];
// scan.setLazyEyeForceDisparity(force_disparity);
if (bgIndex >= 0) {
CLTPass3d bg_scan = tp.clt_3d_passes.get(bgIndex);
// if at least one tile in a cluster is BG, use BG for the whole cluster and set lazy_eye_force_disparity
for (int cY = 0; cY < clustersY; cY ++) {
for (int cX = 0; cX < clustersX; cX ++) {
boolean has_bg = false;
for (int cty = 0; (cty < cluster_size) && !has_bg; cty++) {
int ty = cY * cluster_size + cty;
if (ty < tilesY) for (int ctx = 0; ctx < cluster_size; ctx++) {
int tx = cX * cluster_size + ctx;
if ((tx < tilesX ) && (bg_scan.tile_op[ty][tx] > 0)) {
has_bg = true;
break;
}
}
}
if (has_bg) {
for (int cty = 0; cty < cluster_size; cty++) {
int ty = cY * cluster_size + cty;
if (ty < tilesY) for (int ctx = 0; ctx < cluster_size; ctx++) {
int tx = cX * cluster_size + ctx;
if (tx < tilesX ) {
scan.tile_op[ty][tx] = bg_scan.tile_op[ty][tx];
scan.disparity[ty][tx] = bg_scan.disparity[ty][tx];
}
}
}
force_disparity[cY * clustersX + cX] = true;
}
}
}
scan.setLazyEyeForceDisparity(force_disparity);
}
int [][] tile_op = scan.tile_op;
double [][] disparity_array = scan.disparity;
// Should not happen !
if (scan.disparity == null) { // not used in lwir
System.out.println ("** BUG: should not happen - scan.disparity == null ! **");
System.out.println ("Trying to recover");
double [] backup_disparity = scan.getDisparity(0);
if (backup_disparity == null) {
System.out.println ("** BUG: no disparity at all !");
backup_disparity = new double[tilesX*tilesY];
}
scan.disparity = new double[tilesY][tilesX];
for (int ty = 0; ty < tilesY; ty++) {
for (int tx = 0; tx < tilesX; tx++) {
scan.disparity[ty][tx] = backup_disparity[ty*tilesX + tx];
if (Double.isNaN(scan.disparity[ty][tx])) {
scan.disparity[ty][tx] = 0;
tile_op[ty][tx] = 0;
}
}
}
disparity_array = scan.disparity;
}
if (debugLevel > -1){
int numTiles = 0;
for (int ty = 0; ty < tile_op.length; ty ++) for (int tx = 0; tx < tile_op[ty].length; tx ++){
if (tile_op[ty][tx] != 0) numTiles ++;
}
System.out.println("CLTMeasure("+scanIndex+"): numTiles = "+numTiles);
if ((dbg_y >= 0) && (dbg_x >= 0) && (tile_op[dbg_y][dbg_x] != 0)){
System.out.println("CLTMeasure("+scanIndex+"): tile_op["+dbg_y+"]["+dbg_x+"] = "+tile_op[dbg_y][dbg_x]);
}
}
double min_corr_selected = clt_parameters.min_corr;
double [][] shiftXY = new double [4][2];
if (!clt_parameters.fine_corr_ignore) {
double [][] shiftXY0 = {
{clt_parameters.fine_corr_x_0,clt_parameters.fine_corr_y_0},
{clt_parameters.fine_corr_x_1,clt_parameters.fine_corr_y_1},
{clt_parameters.fine_corr_x_2,clt_parameters.fine_corr_y_2},
{clt_parameters.fine_corr_x_3,clt_parameters.fine_corr_y_3}};
shiftXY = shiftXY0;
}
ImageDtt image_dtt = new ImageDtt(isMonochrome(),clt_parameters.getScaleStrength(isAux()));
double z_correction = clt_parameters.z_correction;
if (clt_parameters.z_corr_map.containsKey(image_name)){ // not used in lwir
z_correction +=clt_parameters.z_corr_map.get(image_name);
}
final double disparity_corr = (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
tp.showScan(
scan, // CLTPass3d scan,
"LY-combo_scan-"+scan+"_post"); //String title)
// use new, LMA-based mismatch calculation
double [][] lazy_eye_data = image_dtt.cltMeasureLazyEye ( // returns d,s lazy eye parameters
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tile_op, // per-tile operation bit codes
disparity_array, // clt_parameters.disparity, // final double disparity,
image_data, // final double [][][] imade_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
null, // final double [][] clt_mismatch, // [12][tilesY * tilesX] // ***** transpose unapplied ***** ?. null - do not calculate
// values in the "main" directions have disparity (*_CM) subtracted, in the perpendicular - as is
null, // disparity_map, // [12][tp.tilesY * tp.tilesX]
tilesX * clt_parameters.transform_size, // imp_quad[0].getWidth(), // final int width,
clt_parameters.getFatZero(isMonochrome()), // add to denominator to modify phase correlation (same units as data1, data2). <0 - pure sum
clt_parameters.corr_red,
clt_parameters.corr_blue,
clt_parameters.getCorrSigma(image_dtt.isMonochrome()),
min_corr_selected, // 0.0001; // minimal correlation value to consider valid
geometryCorrection, // final GeometryCorrection geometryCorrection,
null, // final GeometryCorrection geometryCorrection_main, // if not null correct this camera (aux) to the coordinates of the main
clt_kernels, // final double [][][][][][] clt_kernels, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step,
clt_parameters.transform_size,
clt_parameters.clt_window,
shiftXY, //
disparity_corr, // final double disparity_corr, // disparity at infinity
clt_parameters.shift_x, // final int shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final int shiftY, // shift image vertically (positive - down)
clt_parameters.tileStep, // final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
threadsMax,
debugLevel - 2); // -0);
scan.setLazyEyeData(lazy_eye_data);
scan.is_measured = true; // but no disparity map/textures
scan.is_combo = false;
scan.resetProcessed();
return scan;
}
public ImagePlus [] conditionImageSetBatch( // used in batchCLT3d // not used in lwir
......
......@@ -8255,7 +8255,7 @@ if (debugLevel > -100) return true; // temporarily !
final boolean updateStatus,
final int debugLevel) throws Exception
{
// final boolean batch_mode = clt_parameters.batch_run;
// final boolean batch_mode = clt_parameters.batch_run;
// Reset dsi data (only 2 slices will be used)
this.dsi = new double [DSI_SLICES.length][];
this.dsi_aux_from_main = null; // full data, including rms, fg and bg data
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment