Commit 71aa98de authored by Andrey Filippov's avatar Andrey Filippov

correction scale

parent 6bc620fb
......@@ -1876,53 +1876,61 @@ B = |+dy0 -dy1 -2*dy3 |
}
// create list for infinity data
ArrayList<Sample> inf_samples_list = selectInfinityTiles(
clt_parameters.fcorr_radius, // final double fcorr_radius,
clt_parameters.fcorr_inf_vert,// final boolean use_vertical,
0.0, // any > 0.0
max_diff, // max_diff, //clt_parameters.fcorr_inf_diff
max_iterations, // max_iterations, // clt_parameters.inf_iters
max_coeff_diff, // max_coeff_diff, // clt_parameters.inf_final_diff
far_pull, // far_pull, // clt_parameters.inf_far_pull, = 0.2; // 1; // 0.5;
clt_parameters,
inf_scan,
tilesX,
magic_coeff, // magic_coeff, // still not understood coefficient that reduces reported disparity value. Seems to be around 8.5
debugLevel);
// /clt_parameters.ly_inf_en,
ArrayList<Sample> inf_samples_list;
if (clt_parameters.ly_inf_en) {
inf_samples_list = selectInfinityTiles(
clt_parameters.fcorr_radius, // final double fcorr_radius,
clt_parameters.fcorr_inf_vert,// final boolean use_vertical,
0.0, // any > 0.0
max_diff, // max_diff, //clt_parameters.fcorr_inf_diff
max_iterations, // max_iterations, // clt_parameters.inf_iters
max_coeff_diff, // max_coeff_diff, // clt_parameters.inf_final_diff
far_pull, // far_pull, // clt_parameters.inf_far_pull, = 0.2; // 1; // 0.5;
clt_parameters,
inf_scan,
tilesX,
magic_coeff, // magic_coeff, // still not understood coefficient that reduces reported disparity value. Seems to be around 8.5
debugLevel);
if (debugLevel > -1) {
double inf_weight = 0.0;
if (debugLevel > -1) {
double inf_weight = 0.0;
for (Sample s: inf_samples_list) {
inf_weight += s.weight;
}
System.out.println("lazyEyeCorrection(): number of infinity samples="+inf_samples_list.size()+", total weight = "+inf_weight);
}
// adjust weight to balance infinity data and lazy eye one. As some tiles were discarded by selectInfinityTiles() list and not the original
// array has to be used to find the total weight of the infinity tile. Other ones will be used with no extra filtering
double [] total_weights = new double[2];
for (Sample s: inf_samples_list) {
inf_weight += s.weight;
total_weights[0] += s.weight;
}
System.out.println("lazyEyeCorrection(): number of infinity samples="+inf_samples_list.size()+", total weight = "+inf_weight);
}
// adjust weight to balance infinity data and lazy eye one. As some tiles were discarded by selectInfinityTiles() list and not the original
// array has to be used to find the total weight of the infinity tile. Other ones will be used with no extra filtering
double [] total_weights = new double[2];
for (Sample s: inf_samples_list) {
total_weights[0] += s.weight;
}
for (int nTile = 0; nTile < num_tiles; nTile++) if (center_mask[nTile]){
total_weights[1]+= inf_and_ly[1 * NUM_SLICES + 1][nTile];
}
for (int nTile = 0; nTile < num_tiles; nTile++) if (center_mask[nTile]){
total_weights[1]+= inf_and_ly[1 * NUM_SLICES + 1][nTile];
}
double [] weights = {
inf_fraction * (total_weights[0] + total_weights[1]) / total_weights[0],
(1.0 - inf_fraction) * (total_weights[0] + total_weights[1]) / total_weights[1],
};
double [] weights = {
inf_fraction * (total_weights[0] + total_weights[1]) / total_weights[0],
(1.0 - inf_fraction) * (total_weights[0] + total_weights[1]) / total_weights[1],
};
for (int ns = 0; ns <2; ns++) {
for (int nTile = 0; nTile < num_tiles; nTile++) {
inf_and_ly[ns * NUM_SLICES + 1][nTile] *= weights[ns];
for (int ns = 0; ns <2; ns++) {
for (int nTile = 0; nTile < num_tiles; nTile++) {
inf_and_ly[ns * NUM_SLICES + 1][nTile] *= weights[ns];
}
}
for (Sample s: inf_samples_list) {
s.weight *= weights[0];
}
} else {
inf_samples_list = new ArrayList<Sample>(); // do not use infinity at all
}
for (Sample s: inf_samples_list) {
s.weight *= weights[0];
}
///-----
// Supplement list with the lazy eye scans data - use all tiles
for (int nTile = 0; nTile < num_tiles; nTile++) if (center_mask[nTile]) {
double w = inf_and_ly[1 * NUM_SLICES + 1][nTile];
......@@ -1995,7 +2003,7 @@ B = |+dy0 -dy1 -2*dy3 |
System.out.println(corr_vector.toString());
}
if (apply_extrinsic){
qc.geometryCorrection.getCorrVector().incrementVector(corr_vector);
qc.geometryCorrection.getCorrVector().incrementVector(corr_vector, clt_parameters.ly_corr_scale);
if (debugLevel > -1){
System.out.println("New extrinsic corrections:");
System.out.println(qc.geometryCorrection.getCorrVector().toString());
......
......@@ -2050,7 +2050,7 @@ public class EyesisCorrectionParameters {
public double ly_disp_var = 0.5; // 2; // Maximal full disparity difference to 8 neighbors
public double ly_inf_frac = 0.5; // Relative weight of infinity calibration data
public boolean ly_on_scan = true; // Calculate and apply lazy eye correction after disparity scan (poly or extrinsic)
public boolean ly_inf_en = true; // Simultaneously correct disparity at infinity (both poly and extrinsic)
public boolean ly_inf_en = false; // true; // Simultaneously correct disparity at infinity (both poly and extrinsic)
public boolean ly_inf_force= false; // Force convergence correction during extrinsic, even with no infinity data
public boolean ly_poly = false; // Use polynomial correction, false - correct tilt/azimuth/roll of each sensor
......@@ -2060,6 +2060,7 @@ public class EyesisCorrectionParameters {
public double lyf_frac_keep = 0.5; // Keep best fit samples, discard worst
public int lyf_min_samples = 5; // Minimal number of tiles remaining in the sample
public boolean lyf_norm_center = true; // Replace samples with a single average with equal weight
public double ly_corr_scale = 1.0; // Scale calculated correction vector
// old fcorr parameters, reuse?
// public int fcorr_sample_size = 32; // Use square this size side to detect outliers
......@@ -2695,6 +2696,7 @@ public class EyesisCorrectionParameters {
properties.setProperty(prefix+"lyf_frac_keep", this.lyf_frac_keep +"");
properties.setProperty(prefix+"lyf_min_samples", this.lyf_min_samples+"");
properties.setProperty(prefix+"lyf_norm_center", this.lyf_norm_center+"");
properties.setProperty(prefix+"ly_corr_scale", this.ly_corr_scale +"");
properties.setProperty(prefix+"corr_magic_scale", this.corr_magic_scale +"");
......@@ -3282,6 +3284,7 @@ public class EyesisCorrectionParameters {
if (properties.getProperty(prefix+"lyf_frac_keep")!=null) this.lyf_frac_keep=Double.parseDouble(properties.getProperty(prefix+"lyf_frac_keep"));
if (properties.getProperty(prefix+"lyf_min_samples")!=null) this.lyf_min_samples=Integer.parseInt(properties.getProperty(prefix+"lyf_min_samples"));
if (properties.getProperty(prefix+"lyf_norm_center")!=null) this.lyf_norm_center=Boolean.parseBoolean(properties.getProperty(prefix+"lyf_norm_center"));
if (properties.getProperty(prefix+"ly_corr_scale")!=null) this.ly_corr_scale=Double.parseDouble(properties.getProperty(prefix+"ly_corr_scale"));
if (properties.getProperty(prefix+"corr_magic_scale")!=null) this.corr_magic_scale=Double.parseDouble(properties.getProperty(prefix+"corr_magic_scale"));
......@@ -3886,6 +3889,7 @@ public class EyesisCorrectionParameters {
gd.addNumericField("Keep best fit samples, discard worst", this.lyf_frac_keep, 3);
gd.addNumericField("Minimal number of tiles remaining in the sample", this.lyf_min_samples, 0);
gd.addCheckbox ("Replace samples with a single average with equal weight", this.lyf_norm_center);
gd.addNumericField("Scale calculated correction vector", this.ly_corr_scale, 3);
gd.addMessage ("---");
// gd.addNumericField("Use square this size side to detect outliers", this.fcorr_sample_size, 0);
// gd.addNumericField("Keep tiles only if there are more in each square", this.fcorr_mintiles, 0);
......@@ -4510,6 +4514,7 @@ public class EyesisCorrectionParameters {
this.lyf_frac_keep= gd.getNextNumber();
this.lyf_min_samples= (int) gd.getNextNumber();
this.lyf_norm_center= gd.getNextBoolean();
this.ly_corr_scale= gd.getNextNumber();
// this.fcorr_sample_size= (int)gd.getNextNumber();
// this.fcorr_mintiles= (int) gd.getNextNumber();
// this.fcorr_reloutliers= gd.getNextNumber();
......
......@@ -215,16 +215,17 @@ public class GeometryCorrection {
return s;
}
public void incrementVector(double [] incr)
public void incrementVector(double [] incr,
double scale)
{
for (int i = 0; i < incr.length; i++){
vector[i]+=incr[i];
vector[i]+= incr[i] * scale;
}
}
public void incrementVector(CorrVector incr)
public void incrementVector(CorrVector incr, double scale)
{
incrementVector(incr.toArray());
incrementVector(incr.toArray(), scale);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment