Commit 046d8eb3 authored by Andrey Filippov's avatar Andrey Filippov

working on expansion + bug fixes

parent 89dfef44
import java.util.ArrayList;
import Jama.Matrix;
import ij.IJ;
import ij.ImagePlus;
import ij.ImageStack;
import ij.WindowManager;
/** /**
** **
** AlignmentCorrection - try to apply minor adjustments to the misaligned camera ** AlignmentCorrection - try to apply minor adjustments to the misaligned camera
...@@ -29,6 +21,13 @@ import ij.WindowManager; ...@@ -29,6 +21,13 @@ import ij.WindowManager;
** -----------------------------------------------------------------------------** ** -----------------------------------------------------------------------------**
** **
*/ */
import java.util.ArrayList;
import Jama.Matrix;
import ij.IJ;
import ij.ImagePlus;
import ij.ImageStack;
import ij.WindowManager;
public class AlignmentCorrection { public class AlignmentCorrection {
static int NUM_SLICES = 10; // disp, strength, dx0, dy0, dx1, dy1, dx2, dy2, dx3, dy3) static int NUM_SLICES = 10; // disp, strength, dx0, dy0, dx1, dy1, dx2, dy2, dx3, dy3)
......
import java.util.concurrent.atomic.AtomicInteger;
/**
**
** ExtendSurfaces predict disparity in the unknown areas
**
** Copyright (C) 2017 Elphel, Inc.
**
** -----------------------------------------------------------------------------**
**
** ExtendSurfaces.java is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
** -----------------------------------------------------------------------------**
**
*/
public class ExtendSurfaces {
TileProcessor tp;
public ExtendSurfaces(TileProcessor tp){
this.tp = tp;
}
public int smoothNew( // return maximal correction value
// should first be initialized with all not known = Double.NaN
final double [] values, // will be modified, Double.NaN is not yet assigned
// final double [] strengths, // cell weights
final boolean [] known, // cells with known values (will not be modified)
final boolean [] new_cells, // cells that need to be calculated
final int smpl_size, // == 5
final boolean use_wnd, // use window function fro the neighbors
final double tilt_cost,
final double split_threshold, // if full range of the values around the cell higher, need separate fg, bg
final double same_range, // modify
final boolean en_normal, // expand sngle-range cells (that have all neighbors within split_threshold)
final boolean en_far, // expand background - use neighbors cells within same_range from the lowest
// en_near is only valid if !en_far
final boolean en_near, // expand foreground - use neighbors cells within same_range from the highest
final int max_tries, // maximal number of smoothing steps
final double final_diff, // maximal change to finish iterations
final int dbg_x,
final int dbg_y,
final int debugLevel)
{
for (int nTile = 0; nTile < values.length; nTile++){
if (!known[nTile]) values [nTile] = Double.NaN;
}
for (int num_try = 0; num_try < max_tries; num_try ++){
double max_diff = smoothNewStep( // return maximal correction value
// should first be initialized with all not known = Double.NaN
values, // final double [] values, // will be modified, Double.NaN is not yet assigned
new_cells, // final boolean [] new_cells, // cells that need to be calculated
smpl_size, // final int smpl_size, // == 5
use_wnd, // final boolean use_wnd, // use window function fro the neighbors
tilt_cost, // final double tilt_cost,
split_threshold, // final double split_threshold, // if full range of the values around the cell higher, need separate fg, bg
same_range, // final double same_range, // modify
en_normal, // final boolean en_normal, // expand sngle-range cells (that have all neighbors within split_threshold)
en_far, // final boolean en_far, // expand background - use neighbors cells within same_range from the lowest
// en_near is only valid if !en_far
en_near, // final boolean en_near, // expand foreground - use neighbors cells within same_range from the highest
dbg_x, // final int dbg_x,
dbg_y, // final int dbg_y,
debugLevel); // final int debugLevel);
if (max_diff <= final_diff) break;
}
int num_new = 0;
for (int nTile = 0; nTile < values.length; nTile++){
if (new_cells[nTile] && !Double.isNaN(values[nTile])) num_new++;
}
return num_new;
}
public double smoothNewStep( // return maximal correction value
// should first be initialized with all not known = Double.NaN
final double [] values, // will be modified, Double.NaN is not yet assigned
final boolean [] new_cells, // cells that need to be calculated
final int smpl_size, // == 5
final boolean use_wnd, // use window function fro the neighbors
final double tilt_cost,
final double split_threshold, // if full range of the values around the cell higher, need separate fg, bg
final double same_range, // modify
final boolean en_normal, // expand sngle-range cells (that have all neighbors within split_threshold)
final boolean en_far, // expand background - use neighbors cells within same_range from the lowest
// en_near is only valid if !en_far
final boolean en_near, // expand foreground - use neighbors cells within same_range from the highest
final int dbg_x,
final int dbg_y,
final int debugLevel)
{
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
final int smpl_half = smpl_size / 2; // 2
// probably should not be needed
final double thresholdLin = 1.0E-20; // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
final double [] damping = {tilt_cost,tilt_cost,0.0}; // 0.0 will be applied to average value, tilt_cost - to both tilts
final int center_index = smpl_half * (smpl_size + 1);
// final int smpl_high = smpl_size - smpl_low; // 3
final int smpl_len = smpl_size * smpl_size;
final int dbg_tile = dbg_x + tilesX * dbg_y;
final double [] smpl_weights = MeasuredLayers.getSampleWindow(smpl_size, !use_wnd);
final Thread[] threads = ImageDtt.newThreadArray(tp.threadsMax);
final double [] max_corrs = new double [threads.length];
final AtomicInteger ai_thread = new AtomicInteger(0);
final AtomicInteger ai = new AtomicInteger(0);
final double [] new_vals = values.clone();
final int [] neib_indices = new int [8]; // indices of the sample array immediately around the center;
int ni = 0;
for (int sy = -1; sy < 1; sy++) {
for (int sx = -1; sx < 1; sx++) {
if ((sy != 0) || (sx != 0)){
neib_indices[ni++] = (smpl_half + sx) + (smpl_half + sy) * smpl_size;
}
}
}
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
PolynomialApproximation pa = new PolynomialApproximation();
int this_thread = ai_thread.getAndIncrement();
for (int nTile = ai.getAndIncrement(); nTile < new_vals.length; nTile = ai.getAndIncrement()) {
if (new_cells[nTile]) {
int dl = (nTile == dbg_tile) ? debugLevel : -1;
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
// boolean [] smpl_new = new boolean [smpl_len];
boolean [] smpl_sel = new boolean [smpl_len];
double [] smpl_d = new double [smpl_len];
// double [] smpl_p = new double [smpl_len];
double [] smpl_w = new double [smpl_len];
int num_in_sample = 0;
for (int sy = 0; sy < smpl_size; sy++) {
int y = tileY + sy - smpl_half;
if ((y >= 0) && (y < tilesY)) {
for (int sx = 0; sx < smpl_size; sx++) {
int x = tileX + sx - smpl_half;
if ((x >= 0) && (x < tilesX)) {
int indx = x + y * tilesX;
if (!Double.isNaN(values[indx]) && (indx != center_index)){
int indxs = sx + sy * smpl_size;
// double w = smpl_weights[indxs];
smpl_sel[indxs] = true;
smpl_d[indxs] = values[indx];
num_in_sample++;
}
}
}
}
}
if (num_in_sample > 0) {
// add starting from the known cells. When using 5x5 samples, having immediate neighbor
// to the center usually means that likely there is enough data to determine the tilt.
boolean has_neib = false;
for (int i = 0; i < neib_indices.length; i++) if (smpl_sel[neib_indices[i]]) {
has_neib = true;
break;
}
boolean process_sample = false;
if (has_neib) { // all conditions met, find linear 2-d damped approximation for the center
// if center was NaN - diff is the absolute value.
int imin = -1, imax = -1;
for (int indxs = 0; indxs < smpl_len; indxs ++ ) if (smpl_sel[indxs]){
if ((imin < 0) || (smpl_d[indxs] < smpl_d[imin])) imin = indxs ;
if ((imax < 0) || (smpl_d[indxs] > smpl_d[imax])) imax = indxs ;
}
// does it all fit
if ((smpl_d[imax] - smpl_d[imin]) <= split_threshold){
process_sample = en_normal;
} else {
if (en_far) {
double threshold = smpl_d[imax] - smpl_d[imax];
for (int indxs = 0; indxs < smpl_len; indxs ++ ) if (smpl_sel[indxs]){
if (smpl_d[indxs] < threshold) {
smpl_sel[indxs] = false;
num_in_sample--;
}
}
process_sample = true;
} else if (en_near){
double threshold = smpl_d[imin] + smpl_d[imax];
for (int indxs = 0; indxs < smpl_len; indxs ++ ) if (smpl_sel[indxs]){
if (smpl_d[indxs] > threshold) {
smpl_sel[indxs] = false;
num_in_sample--;
}
}
process_sample = true;
}
// removed some points - recheck neighbors
if (process_sample){
has_neib = false;
for (int i = 0; i < neib_indices.length; i++) if (smpl_sel[neib_indices[i]]) {
has_neib = true;
break;
}
if (!has_neib) process_sample = false; // neighbor was removed, will process
// with other direction
}
}
}
if (process_sample) {
// build linear 2d approximation, damped as there could be co-linear cells
// or even a single cell
double sw = 0.0;
double [][][] mdata = new double [num_in_sample][3][];
int mindx = 0;
for (int sy = 0; sy < smpl_size; sy++){
for (int sx = 0; sx < smpl_size; sx++){
int indxs = sy * smpl_size + sx;
if (smpl_sel[indxs]) {
mdata[mindx][0] = new double [2];
mdata[mindx][0][0] = sx - smpl_half; // should match integer center
mdata[mindx][0][1] = sy - smpl_half;
mdata[mindx][1] = new double [1];
mdata[mindx][1][0] = smpl_d[indxs];
mdata[mindx][2] = new double [1];
mdata[mindx][2][0] = smpl_w[indxs];
mindx ++;
}
}
}
double[][] approx2d = pa.quadraticApproximation(
mdata,
true, // boolean forceLinear, // use linear approximation
damping, // double [] damping,
thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
0.0, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
dl); // int debugLevel;
if (approx2d == null) {
System.out.println("A BUG in smoothNewStep() - failed quadraticApproximation()");
continue;
}
double new_center_val = approx2d[0][0]; // Last coefficient, F (of 6)
double this_corr;
if (Double.isNaN(values[nTile])) this_corr = Math.abs(new_center_val);
else this_corr = Math.abs(new_center_val - values[nTile]);
new_vals[nTile] = new_center_val;
max_corrs[this_thread] = Math.max(max_corrs[this_thread], this_corr);
}
}
}
}
}
};
}
System.arraycopy(new_vals, 0, values, 0, new_vals.length);
double max_corr = 0;
for (int i = 0; i < max_corrs.length; i++) max_corr = Math.max(max_corr,max_corrs[i]);
return max_corr;
}
}
/*
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) {
// int dl = (nTile == dbg_tile) ? debugLevel : -1;
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
measured[nTile] = measured_scan.tile_op[tileY][tileX] != 0;
if (!measured[nTile]) disp_strength[1][nTile] = -1.0;
}
}
};
}
ImageDtt.startAndJoin(threads);
*/
\ No newline at end of file
...@@ -2070,6 +2070,9 @@ public class EyesisCorrectionParameters { ...@@ -2070,6 +2070,9 @@ public class EyesisCorrectionParameters {
public double ex_strength = 0.18; // minimal 4-corr strength to trust tile public double ex_strength = 0.18; // minimal 4-corr strength to trust tile
public double ex_nstrength = 0.4; // minimal 4-corr strength divided by channel diff for new (border) tiles public double ex_nstrength = 0.4; // minimal 4-corr strength divided by channel diff for new (border) tiles
public boolean ex_over_bgnd = false; // Allow expansion over previously identified background (infinity)
public double ex_min_over = 1.0; // When expanding over background, disregard lower disparity
public double pt_super_trust = 1.6; // If strength exceeds ex_strength * super_trust, do not apply ex_nstrength and plate_ds public double pt_super_trust = 1.6; // If strength exceeds ex_strength * super_trust, do not apply ex_nstrength and plate_ds
public boolean pt_keep_raw_fg = true; // Do not replace raw tiles by the plates, if raw is closer (like poles) public boolean pt_keep_raw_fg = true; // Do not replace raw tiles by the plates, if raw is closer (like poles)
public double pt_scale_pre = 1.5; // Scale plates strength before comparing to raw strength public double pt_scale_pre = 1.5; // Scale plates strength before comparing to raw strength
...@@ -2417,7 +2420,7 @@ public class EyesisCorrectionParameters { ...@@ -2417,7 +2420,7 @@ public class EyesisCorrectionParameters {
public boolean tsLoopMulti = true; // Repeat multi-choice assignment while succeeding public boolean tsLoopMulti = true; // Repeat multi-choice assignment while succeeding
public boolean tsReset = false; // Reset tiles to surfaces assignment public boolean tsReset = false; // Reset tiles to surfaces assignment
public boolean tsShow = false; // Show results of tiles to surfaces assignment public boolean tsShow = false; // Show results of tiles to surfaces assignment
public int tsNumClust = 50; // Number of clusters to keep public int tsNumClust = 500; // Number of clusters to keep
public int tsConsensMode = 7; // Which assignments to match +1 - combo, +2 grown single, +4 plane seeds public int tsConsensMode = 7; // Which assignments to match +1 - combo, +2 grown single, +4 plane seeds
public int tsConsensAgree = 1; // Minimal number of assignments to agree public int tsConsensAgree = 1; // Minimal number of assignments to agree
...@@ -2624,6 +2627,9 @@ public class EyesisCorrectionParameters { ...@@ -2624,6 +2627,9 @@ public class EyesisCorrectionParameters {
properties.setProperty(prefix+"ex_strength", this.ex_strength +""); properties.setProperty(prefix+"ex_strength", this.ex_strength +"");
properties.setProperty(prefix+"ex_nstrength", this.ex_nstrength +""); properties.setProperty(prefix+"ex_nstrength", this.ex_nstrength +"");
properties.setProperty(prefix+"ex_over_bgnd", this.ex_over_bgnd+"");
properties.setProperty(prefix+"ex_min_over", this.ex_min_over +"");
properties.setProperty(prefix+"pt_super_trust", this.pt_super_trust +""); properties.setProperty(prefix+"pt_super_trust", this.pt_super_trust +"");
properties.setProperty(prefix+"pt_keep_raw_fg", this.pt_keep_raw_fg+""); properties.setProperty(prefix+"pt_keep_raw_fg", this.pt_keep_raw_fg+"");
properties.setProperty(prefix+"pt_scale_pre", this.pt_scale_pre +""); properties.setProperty(prefix+"pt_scale_pre", this.pt_scale_pre +"");
...@@ -3141,6 +3147,9 @@ public class EyesisCorrectionParameters { ...@@ -3141,6 +3147,9 @@ public class EyesisCorrectionParameters {
if (properties.getProperty(prefix+"ex_strength")!=null) this.ex_strength=Double.parseDouble(properties.getProperty(prefix+"ex_strength")); if (properties.getProperty(prefix+"ex_strength")!=null) this.ex_strength=Double.parseDouble(properties.getProperty(prefix+"ex_strength"));
if (properties.getProperty(prefix+"ex_nstrength")!=null) this.ex_nstrength=Double.parseDouble(properties.getProperty(prefix+"ex_nstrength")); if (properties.getProperty(prefix+"ex_nstrength")!=null) this.ex_nstrength=Double.parseDouble(properties.getProperty(prefix+"ex_nstrength"));
if (properties.getProperty(prefix+"ex_over_bgnd")!=null) this.ex_over_bgnd=Boolean.parseBoolean(properties.getProperty(prefix+"ex_over_bgnd"));
if (properties.getProperty(prefix+"ex_min_over")!=null) this.ex_min_over=Double.parseDouble(properties.getProperty(prefix+"ex_min_over"));
if (properties.getProperty(prefix+"pt_super_trust")!=null) this.pt_super_trust=Double.parseDouble(properties.getProperty(prefix+"pt_super_trust")); if (properties.getProperty(prefix+"pt_super_trust")!=null) this.pt_super_trust=Double.parseDouble(properties.getProperty(prefix+"pt_super_trust"));
if (properties.getProperty(prefix+"pt_keep_raw_fg")!=null) this.pt_keep_raw_fg=Boolean.parseBoolean(properties.getProperty(prefix+"pt_keep_raw_fg")); if (properties.getProperty(prefix+"pt_keep_raw_fg")!=null) this.pt_keep_raw_fg=Boolean.parseBoolean(properties.getProperty(prefix+"pt_keep_raw_fg"));
if (properties.getProperty(prefix+"pt_scale_pre")!=null) this.pt_scale_pre=Double.parseDouble(properties.getProperty(prefix+"pt_scale_pre")); if (properties.getProperty(prefix+"pt_scale_pre")!=null) this.pt_scale_pre=Double.parseDouble(properties.getProperty(prefix+"pt_scale_pre"));
...@@ -3680,6 +3689,9 @@ public class EyesisCorrectionParameters { ...@@ -3680,6 +3689,9 @@ public class EyesisCorrectionParameters {
gd.addNumericField("Minimal 4-corr strength to trust tile", this.ex_strength, 3); gd.addNumericField("Minimal 4-corr strength to trust tile", this.ex_strength, 3);
gd.addNumericField("Minimal 4-corr strength divided by channel diff for new (border) tiles", this.ex_nstrength, 3); gd.addNumericField("Minimal 4-corr strength divided by channel diff for new (border) tiles", this.ex_nstrength, 3);
gd.addCheckbox ("Allow expansion over previously identified background (infinity)", this.ex_over_bgnd);
gd.addNumericField("When expanding over background, disregard lower disparity ", this.ex_min_over, 3);
gd.addMessage ("********* Plates filetering when building initial z-map *********"); gd.addMessage ("********* Plates filetering when building initial z-map *********");
gd.addNumericField("If strength exceeds ex_strength * super_trust, do not apply ex_nstrength and plate_ds", this.pt_super_trust, 3); gd.addNumericField("If strength exceeds ex_strength * super_trust, do not apply ex_nstrength and plate_ds", this.pt_super_trust, 3);
gd.addCheckbox ("Do not replace raw tiles by the plates, if raw is closer (like poles)", this.pt_keep_raw_fg); gd.addCheckbox ("Do not replace raw tiles by the plates, if raw is closer (like poles)", this.pt_keep_raw_fg);
...@@ -3803,10 +3815,10 @@ public class EyesisCorrectionParameters { ...@@ -3803,10 +3815,10 @@ public class EyesisCorrectionParameters {
gd.addNumericField("Consider merging initial planes if disparity difference below", this.stSmallDiff, 6); gd.addNumericField("Consider merging initial planes if disparity difference below", this.stSmallDiff, 6);
gd.addNumericField("Consider merging initial planes if jumps between ratio above", this.stHighMix, 6); gd.addNumericField("Consider merging initial planes if jumps between ratio above", this.stHighMix, 6);
gd.addNumericField("Outlayer tiles weaker than this may be replaced from neighbors", this.outlayerStrength, 6); gd.addNumericField("Outlier tiles weaker than this may be replaced from neighbors", this.outlayerStrength, 6);
gd.addNumericField("Replace weak outlayer tiles that do not have neighbors within this disparity difference", this.outlayerDiff, 6); gd.addNumericField("Replace weak outlier tiles that do not have neighbors within this disparity difference", this.outlayerDiff, 6);
gd.addNumericField("Replace weak outlayer tiles that have higher disparity than weighted average", this.outlayerDiffPos, 6); gd.addNumericField("Replace weak outlier tiles that have higher disparity than weighted average", this.outlayerDiffPos, 6);
gd.addNumericField("Replace weak outlayer tiles that have lower disparity than weighted average", this.outlayerDiffNeg, 6); gd.addNumericField("Replace weak outlier tiles that have lower disparity than weighted average", this.outlayerDiffNeg, 6);
gd.addCheckbox ("Combine with all previous after refine pass", this.combine_refine); gd.addCheckbox ("Combine with all previous after refine pass", this.combine_refine);
gd.addNumericField("Disregard weaker tiles when combining scans", this.combine_min_strength, 6); gd.addNumericField("Disregard weaker tiles when combining scans", this.combine_min_strength, 6);
...@@ -4222,6 +4234,9 @@ public class EyesisCorrectionParameters { ...@@ -4222,6 +4234,9 @@ public class EyesisCorrectionParameters {
this.ex_strength= gd.getNextNumber(); this.ex_strength= gd.getNextNumber();
this.ex_nstrength= gd.getNextNumber(); this.ex_nstrength= gd.getNextNumber();
this.ex_over_bgnd= gd.getNextBoolean();
this.ex_min_over= gd.getNextNumber();
this.pt_super_trust= gd.getNextNumber(); this.pt_super_trust= gd.getNextNumber();
this.pt_keep_raw_fg= gd.getNextBoolean(); this.pt_keep_raw_fg= gd.getNextBoolean();
this.pt_scale_pre= gd.getNextNumber(); this.pt_scale_pre= gd.getNextNumber();
......
...@@ -3879,7 +3879,7 @@ public class LinkPlanes { ...@@ -3879,7 +3879,7 @@ public class LinkPlanes {
weak_pairs, // final boolean [][] weak_fg_pairs, weak_pairs, // final boolean [][] weak_fg_pairs,
1.0, // double relax, 1.0, // double relax,
plWeakFgRelax, // final double relax_weak_fg, plWeakFgRelax, // final double relax_weak_fg,
debugLevel, // final int debugLevel) 1+ debugLevel, // final int debugLevel)
dbg_tileX, dbg_tileX,
dbg_tileY); dbg_tileY);
// Calculate costs of merging planes of the same supertile and remove those that are exceed threshold // Calculate costs of merging planes of the same supertile and remove those that are exceed threshold
......
...@@ -337,7 +337,7 @@ public class MeasuredLayers { ...@@ -337,7 +337,7 @@ public class MeasuredLayers {
* @return [smplSide * smplSide]array of weights, 1.0 in the center * @return [smplSide * smplSide]array of weights, 1.0 in the center
*/ */
public double [] getSampleWindow(int smplSide, boolean all1){ static double [] getSampleWindow(int smplSide, boolean all1){
double [] weights = new double [smplSide * smplSide]; double [] weights = new double [smplSide * smplSide];
for (int sy = 0; sy < smplSide; sy++){ for (int sy = 0; sy < smplSide; sy++){
for (int sx = 0; sx < smplSide; sx++){ for (int sx = 0; sx < smplSide; sx++){
...@@ -898,6 +898,9 @@ public class MeasuredLayers { ...@@ -898,6 +898,9 @@ public class MeasuredLayers {
boolean null_if_none, boolean null_if_none,
int debugLevel) int debugLevel)
{ {
boolean use_new = true; // false;
if (use_new) {
return getDisparityStrength ( return getDisparityStrength (
num_layer, // int num_layer, num_layer, // int num_layer,
stX, // int stX, stX, // int stX,
...@@ -914,6 +917,25 @@ public class MeasuredLayers { ...@@ -914,6 +917,25 @@ public class MeasuredLayers {
0.2, // double max_rel_tilt, // = 0.2; // (pix / disparity) per tile 0.2, // double max_rel_tilt, // = 0.2; // (pix / disparity) per tile
null_if_none, // boolean null_if_none, null_if_none, // boolean null_if_none,
debugLevel); // int debugLevel) debugLevel); // int debugLevel)
} else {
return getDisparityStrength_old (
num_layer, // int num_layer,
stX, // int stX,
stY, // int stY,
sel_in, // boolean [] sel_in,
// null, // double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
strength_floor, // double strength_floor,
strength_pow, // double strength_pow,
smplSide, // int smplSide, // = 2; // Sample size (side of a square)
smplNum, // int smplNum, // = 3; // Number after removing worst (should be >1)
smplRms, // double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
smplWnd, // boolean smplWnd, //
// 2.0, // double max_abs_tilt, // = 2.0; // pix per tile
// 0.2, // double max_rel_tilt, // = 0.2; // (pix / disparity) per tile
null_if_none, // boolean null_if_none,
debugLevel); // int debugLevel)
}
} }
public double[][] getDisparityStrength ( public double[][] getDisparityStrength (
...@@ -921,7 +943,9 @@ public class MeasuredLayers { ...@@ -921,7 +943,9 @@ public class MeasuredLayers {
int stX, int stX,
int stY, int stY,
boolean [] sel_in, boolean [] sel_in,
double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values double [][] atiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
// if it has just one element - apply to all tiles, otherwise it is supposed
// to be per-tile of a supertile array
double strength_floor, double strength_floor,
double strength_pow, double strength_pow,
int smplSide, // = 2; // Sample size (side of a square) int smplSide, // = 2; // Sample size (side of a square)
...@@ -1018,7 +1042,7 @@ public class MeasuredLayers { ...@@ -1018,7 +1042,7 @@ public class MeasuredLayers {
if (num_in_sample >= smplNum){ // try, remove worst if (num_in_sample >= smplNum){ // try, remove worst
sample_loop: sample_loop:
{ {
boolean en_tilt = (tiltXY == null); boolean en_tilt = (atiltXY == null);
if (en_tilt) { // make sure there are enough samples and not all of them are on the same line if (en_tilt) { // make sure there are enough samples and not all of them are on the same line
if (!notColinear(smpl_sel,smplSide)){ if (!notColinear(smpl_sel,smplSide)){
en_tilt = false; en_tilt = false;
...@@ -1128,7 +1152,10 @@ public class MeasuredLayers { ...@@ -1128,7 +1152,10 @@ public class MeasuredLayers {
} }
} }
if (iworst < 0){ if (iworst < 0){
System.out.println("**** this is a BUG in getDisparityStrength() can not find the worst sample ****"); if (debugLevel > 0) {
System.out.println("**** this may be BUG in getDisparityStrength() can not find the worst sample - all tiles fit perfectly ****");
}
// this can happen if some samples are the same and all the pixels fit exactly - use all of them
break; break;
} }
// remove worst sample // remove worst sample
...@@ -1158,6 +1185,15 @@ public class MeasuredLayers { ...@@ -1158,6 +1185,15 @@ public class MeasuredLayers {
} else { // fixed tilt } else { // fixed tilt
// tilt around center // tilt around center
double [] tiltXY= {0.0,0.0};
if (atiltXY != null){ // if null ( free but failed co-linear test - keep both tilts == 0.0
if (atiltXY.length == 1){
tiltXY = atiltXY[0]; // common for all tiles (such as constant disparity)
} else if (atiltXY[indx] != null){
tiltXY = atiltXY[indx];
}
}
if ((tiltXY != null) && (tiltXY[0] != 0.0) && (tiltXY[1] != 0.0)){ if ((tiltXY != null) && (tiltXY[0] != 0.0) && (tiltXY[1] != 0.0)){
for (int sy = 0; sy < smplSide; sy++){ for (int sy = 0; sy < smplSide; sy++){
for (int sx = 0; sx < smplSide; sx++){ for (int sx = 0; sx < smplSide; sx++){
......
...@@ -195,50 +195,83 @@ public class PolynomialApproximation { ...@@ -195,50 +195,83 @@ public class PolynomialApproximation {
double [] xy=M.solve(new Matrix(aB)).getColumnPackedCopy(); double [] xy=M.solve(new Matrix(aB)).getColumnPackedCopy();
return xy; return xy;
} }
/* ======================================================================== */
/**
* See below, this version is for backward compatibility with no damping
* @param data
* @param forceLinear
* @return
*/
public double [][] quadraticApproximation(
double [][][] data,
boolean forceLinear) // use linear approximation
{
return quadraticApproximation(
data,
forceLinear,
null);
}
/* ======================================================================== */ /**
/** * Approximate function z(x,y) as a second degree polynomial (or just linear)
* Approximate function z(x,y) as a second degree polynomial (or just linear) * f(x,y)=A*x^2+B*y^2+C*x*y+D*x+E*y+F or f(x,y)=D*x+E*y+F
* f(x,y)=A*x^2+B*y^2+C*x*y+D*x+E*y+F or f(x,y)=D*x+E*y+F * @param data array consists of lines of either 2 or 3 vectors:
* data array consists of lines of either 2 or 3 vectors: * 2-element vector x,y
* 2-element vector x,y * variable length vector z (should be the same for all samples)
* variable length vector z (should be the same for all samples) * optional 1- element vector w (weight of the sample)
* optional 1- element vector w (weight of the sample) * @param forceLinear force linear approximation
* * @param damping optional (may be null) array of 3 (for linear) or 6 (quadratic) values that adds cost
* returns array of vectors or null * for corresponding coefficient be non-zero. This can be used to find reasonable solutions when
* each vector (one per each z component) is either 6-element- (A,B,C,D,E,F) if quadratic is possible and enabled * data is insufficient. Just one data point would result in a plane of constant z, co-linear
* or 3-element - (D,E,F) if linear is possible and quadratic is not possible or disabled * points will produce a plane with a gradient along this line
* returns null if not enough data even for the linear approximation * @return array of vectors or null
* each vector (one per each z component) is either 6-element- (A,B,C,D,E,F) if quadratic is possible and enabled
*/ * or 3-element - (D,E,F) if linear is possible and quadratic is not possible or disabled
* returns null if not enough data even for the linear approximation
*/
public double [][] quadraticApproximation( public double [][] quadraticApproximation(
double [][][] data, double [][][] data,
boolean forceLinear) // use linear approximation boolean forceLinear, // use linear approximation
double [] damping)
{ {
return quadraticApproximation( return quadraticApproximation(
data, data,
forceLinear, // use linear approximation forceLinear, // use linear approximation
1.0E-10, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't? damping,
1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
this.debugLevel); this.debugLevel);
} }
public double [][] quadraticApproximation(
double [][][] data,
boolean forceLinear, // use linear approximation
int debugLevel
){
return quadraticApproximation(
data,
forceLinear, // use linear approximation
null,
debugLevel);
}
public double [][] quadraticApproximation( public double [][] quadraticApproximation(
double [][][] data, double [][][] data,
boolean forceLinear, // use linear approximation boolean forceLinear, // use linear approximation
double [] damping,
int debugLevel int debugLevel
){ ){
return quadraticApproximation( return quadraticApproximation(
data, data,
forceLinear, // use linear approximation forceLinear, // use linear approximation
damping,
1.0E-10, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't? 1.0E-10, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail) 1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
debugLevel); debugLevel);
/*
1.0E-12, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0E-20); // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
*/
} }
public double [][] quadraticApproximation( public double [][] quadraticApproximation(
double [][][] data, double [][][] data,
boolean forceLinear, // use linear approximation boolean forceLinear, // use linear approximation
...@@ -248,14 +281,50 @@ public class PolynomialApproximation { ...@@ -248,14 +281,50 @@ public class PolynomialApproximation {
return quadraticApproximation( return quadraticApproximation(
data, data,
forceLinear, // use linear approximation forceLinear, // use linear approximation
null,
1.0E-10, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
this.debugLevel);
}
public double [][] quadraticApproximation(
double [][][] data,
boolean forceLinear, // use linear approximation
double thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
double [] damping)
{
return quadraticApproximation(
data,
forceLinear, // use linear approximation
damping,
1.0E-10, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't? 1.0E-10, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail) 1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
this.debugLevel); this.debugLevel);
} }
public double [][] quadraticApproximation(
double [][][] data,
boolean forceLinear, // use linear approximation
double thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
int debugLevel)
{
return quadraticApproximation(
data,
forceLinear, // use linear approximation
null,
thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
debugLevel);
}
public double [][] quadraticApproximation( public double [][] quadraticApproximation(
double [][][] data, double [][][] data,
boolean forceLinear, // use linear approximation boolean forceLinear, // use linear approximation
double [] damping,
double thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) double thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail) double thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
int debugLevel int debugLevel
...@@ -327,6 +396,23 @@ public class PolynomialApproximation { ...@@ -327,6 +396,23 @@ public class PolynomialApproximation {
(5) 0= A*S21 + B*S03 + C*S12 + D*S11 + E*S02 + F*S01 - SZ01 (5) 0= A*S21 + B*S03 + C*S12 + D*S11 + E*S02 + F*S01 - SZ01
(6) 0= A*S20 + B*S02 + C*S11 + D*S10 + E*S01 + F*S00 - SZ00 (6) 0= A*S20 + B*S02 + C*S11 + D*S10 + E*S01 + F*S00 - SZ00
*/ */
Matrix mDampingLin = null;
Matrix mDampingQuad = null;
if (damping != null){
mDampingLin = new Matrix(3,3);
for (int i = 0; i < 3; i++){
int j = damping.length + i - 3;
if (j >= 0) mDampingLin.set(i,i,damping[j]);
}
if (!forceLinear) {
mDampingQuad = new Matrix(6,6);
for (int i = 0; i < 6; i++){
int j = damping.length + i - 6;
if (j >= 0) mDampingQuad.set(i,i,damping[j]);
}
}
}
int zDim=data[0][1].length; int zDim=data[0][1].length;
double w,z,x,x2,x3,x4,y,y2,y3,y4,wz; double w,z,x,x2,x3,x4,y,y2,y3,y4,wz;
...@@ -399,11 +485,16 @@ public class PolynomialApproximation { ...@@ -399,11 +485,16 @@ public class PolynomialApproximation {
{S20,S11,S10}, {S20,S11,S10},
{S11,S02,S01}, {S11,S02,S01},
{S10,S01,S00}}; {S10,S01,S00}};
Matrix M=new Matrix (mAarrayL); Matrix mLin=new Matrix (mAarrayL);
if (mDampingLin != null){
mLin.plusEquals(mDampingLin);
}
// TODO Maybe bypass determinant checks for damped ?
Matrix Z; Matrix Z;
if (debugLevel>3) System.out.println(">>> n="+n+" det_lin="+M.det()+" norm_lin="+normMatix(mAarrayL)); if (debugLevel>3) System.out.println(">>> n="+n+" det_lin="+mLin.det()+" norm_lin="+normMatix(mAarrayL));
double nmL=normMatix(mAarrayL); double nmL=normMatix(mAarrayL);
if ((nmL==0.0) || (Math.abs(M.det())/nmL<thresholdLin)){ if ((nmL==0.0) || (Math.abs(mLin.det())/nmL<thresholdLin)){
// return average value for each channel // return average value for each channel
if (S00==0.0) return null; // not even average if (S00==0.0) return null; // not even average
double [][] ABCDEF=new double[zDim][3]; double [][] ABCDEF=new double[zDim][3];
...@@ -422,7 +513,7 @@ public class PolynomialApproximation { ...@@ -422,7 +513,7 @@ public class PolynomialApproximation {
zAarrayL[1]=SZ01[i]; zAarrayL[1]=SZ01[i];
zAarrayL[2]=SZ00[i]; zAarrayL[2]=SZ00[i];
Z=new Matrix (zAarrayL,3); Z=new Matrix (zAarrayL,3);
ABCDEF[i]= M.solve(Z).getRowPackedCopy(); ABCDEF[i]= mLin.solve(Z).getRowPackedCopy();
} }
if (forceLinear) return ABCDEF; if (forceLinear) return ABCDEF;
// quote try quadratic approximation // quote try quadratic approximation
...@@ -433,18 +524,22 @@ public class PolynomialApproximation { ...@@ -433,18 +524,22 @@ public class PolynomialApproximation {
{S30,S12,S21,S20,S11,S10}, {S30,S12,S21,S20,S11,S10},
{S21,S03,S12,S11,S02,S01}, {S21,S03,S12,S11,S02,S01},
{S20,S02,S11,S10,S01,S00}}; {S20,S02,S11,S10,S01,S00}};
M=new Matrix (mAarrayQ); Matrix mQuad=new Matrix (mAarrayQ);
if (mDampingQuad != null){
mQuad.plusEquals(mDampingQuad);
}
if (debugLevel>3) { if (debugLevel>3) {
System.out.println(" n="+n+" det_quad="+M.det()+" norm_quad="+normMatix(mAarrayQ)+" data.length="+data.length); System.out.println(" n="+n+" det_quad="+mQuad.det()+" norm_quad="+normMatix(mAarrayQ)+" data.length="+data.length);
M.print(10,5); mQuad.print(10,5);
} }
double nmQ=normMatix(mAarrayQ); double nmQ=normMatix(mAarrayQ);
if ((nmQ==0.0) || (Math.abs(M.det())/normMatix(mAarrayQ)<thresholdQuad)) { if ((nmQ==0.0) || (Math.abs(mQuad.det())/normMatix(mAarrayQ)<thresholdQuad)) {
if (debugLevel>0) System.out.println("Using linear approximation, M.det()="+M.det()+ if (debugLevel>0) System.out.println("Using linear approximation, M.det()="+mQuad.det()+
" normMatix(mAarrayQ)="+normMatix(mAarrayQ)+ " normMatix(mAarrayQ)="+normMatix(mAarrayQ)+
", thresholdQuad="+thresholdQuad+ ", thresholdQuad="+thresholdQuad+
", nmQ="+nmQ+ ", nmQ="+nmQ+
", Math.abs(M.det())/normMatix(mAarrayQ)="+(Math.abs(M.det())/normMatix(mAarrayQ))); //did not happen ", Math.abs(M.det())/normMatix(mAarrayQ)="+(Math.abs(mQuad.det())/normMatix(mAarrayQ))); //did not happen
return ABCDEF; // not enough data for the quadratic approximation, return linear return ABCDEF; // not enough data for the quadratic approximation, return linear
} }
// double [] zAarrayQ={SZ20,SZ02,SZ11,SZ10,SZ01,SZ00}; // double [] zAarrayQ={SZ20,SZ02,SZ11,SZ10,SZ01,SZ00};
...@@ -457,7 +552,7 @@ public class PolynomialApproximation { ...@@ -457,7 +552,7 @@ public class PolynomialApproximation {
zAarrayQ[4]=SZ01[i]; zAarrayQ[4]=SZ01[i];
zAarrayQ[5]=SZ00[i]; zAarrayQ[5]=SZ00[i];
Z=new Matrix (zAarrayQ,6); Z=new Matrix (zAarrayQ,6);
ABCDEF[i]= M.solve(Z).getRowPackedCopy(); ABCDEF[i]= mQuad.solve(Z).getRowPackedCopy();
} }
return ABCDEF; return ABCDEF;
} }
......
...@@ -5702,11 +5702,21 @@ public class QuadCLT { ...@@ -5702,11 +5702,21 @@ public class QuadCLT {
final boolean updateStatus, final boolean updateStatus,
final int debugLevel) final int debugLevel)
{ {
final int max_expand = 300; // 150; // 30; final int max_expand = 500; // 150; // 30;
// Temporary assign here // Temporary assign here
final int disp_index = ImageDtt.DISPARITY_INDEX_CM; final int disp_index = ImageDtt.DISPARITY_INDEX_CM;
final int str_index = ImageDtt.DISPARITY_STRENGTH_INDEX; final int str_index = ImageDtt.DISPARITY_STRENGTH_INDEX;
final double strength_floor = 0.8* clt_parameters.combine_min_strength; final double strength_floor = 0.8* clt_parameters.combine_min_strength;
// TODO: define, make parameters
final double comboMinStrength = 0.3; // 0.3;
final double comboMinStrengthHor = 0.3; // 0.3;
final double comboMinStrengthVert = 0.3; // 0.3;
final double filterMinStrength = 0.3; // 0.3;
// TODO: make parameters
final double strength_pow = 1.0; final double strength_pow = 1.0;
final int smplSide = 5; // 3; // Sample size (side of a square) final int smplSide = 5; // 3; // Sample size (side of a square)
final int smplNum = 13; // 5; // Number after removing worst (should be >1) final int smplNum = 13; // 5; // Number after removing worst (should be >1)
...@@ -5720,7 +5730,7 @@ public class QuadCLT { ...@@ -5720,7 +5730,7 @@ public class QuadCLT {
// final double reliable_raw_strength=0.25; // final double reliable_raw_strength=0.25;
final boolean show_init_refine = clt_parameters.show_init_refine; final boolean show_init_refine = clt_parameters.show_init_refine;
final boolean show_expand = clt_parameters.show_expand && (max_expand <= 10); boolean show_expand = clt_parameters.show_expand && (max_expand <= 10);
final boolean show_retry_far = clt_parameters.show_retry_far && (max_expand <= 10); final boolean show_retry_far = clt_parameters.show_retry_far && (max_expand <= 10);
//max_expand //max_expand
String name = (String) imp_quad[0].getProperty("name"); String name = (String) imp_quad[0].getProperty("name");
...@@ -5767,6 +5777,9 @@ public class QuadCLT { ...@@ -5767,6 +5777,9 @@ public class QuadCLT {
double [][] filtered_bgnd_disp_strength = tp.getFilteredDisparityStrength( double [][] filtered_bgnd_disp_strength = tp.getFilteredDisparityStrength(
tp.clt_3d_passes.size() - 1, // final int measured_scan_index, // will not look at higher scans tp.clt_3d_passes.size() - 1, // final int measured_scan_index, // will not look at higher scans
0, // final int start_scan_index, 0, // final int start_scan_index,
null , // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
0.0, // whatever as null above // clt_parameters.ex_min_over,// final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
disp_index, // final int disp_index, disp_index, // final int disp_index,
str_index, // final int str_index, str_index, // final int str_index,
null, // final double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values null, // final double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
...@@ -5917,6 +5930,8 @@ public class QuadCLT { ...@@ -5917,6 +5930,8 @@ public class QuadCLT {
tp.clt_3d_passes, // final ArrayList <CLTPass3d> passes, tp.clt_3d_passes, // final ArrayList <CLTPass3d> passes,
bg_pass, // final int firstPass, bg_pass, // final int firstPass,
tp.clt_3d_passes.size(), // final int lastPassPlus1, tp.clt_3d_passes.size(), // final int lastPassPlus1,
// tp.clt_3d_passes.get(bg_pass).getSelected(), // selected , // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
// clt_parameters.ex_min_over,// final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
tp.getTrustedCorrelation(), // final double trustedCorrelation, tp.getTrustedCorrelation(), // final double trustedCorrelation,
0.0, // clt_parameters.bgnd_range, // final double disp_far, // limit results to the disparity range 0.0, // clt_parameters.bgnd_range, // final double disp_far, // limit results to the disparity range
clt_parameters.grow_disp_max, // final double disp_near, clt_parameters.grow_disp_max, // final double disp_near,
...@@ -5948,7 +5963,13 @@ public class QuadCLT { ...@@ -5948,7 +5963,13 @@ public class QuadCLT {
boolean last_pass = false; boolean last_pass = false;
// for (int num_expand = 0; (num_expand < 4) && (num_extended != 0); num_expand++) { // for (int num_expand = 0; (num_expand < 4) && (num_extended != 0); num_expand++) {
boolean over_infinity = false; boolean over_infinity = false;
int dbg_start_pass = -20;
int dbg_end_pass = -29;
for (int num_expand = 0; num_expand < max_expand; num_expand++) { for (int num_expand = 0; num_expand < max_expand; num_expand++) {
boolean dbg_pass = (num_expand >= dbg_start_pass) && (num_expand <= dbg_end_pass);
show_expand = (clt_parameters.show_expand && (max_expand <= 10)) || dbg_pass;
Runtime runtime = Runtime.getRuntime(); Runtime runtime = Runtime.getRuntime();
runtime.gc(); runtime.gc();
...@@ -5958,6 +5979,8 @@ public class QuadCLT { ...@@ -5958,6 +5979,8 @@ public class QuadCLT {
double [][] filtered_disp_strength = tp.getFilteredDisparityStrength( double [][] filtered_disp_strength = tp.getFilteredDisparityStrength(
tp.clt_3d_passes.size() - 1, // final int measured_scan_index, // will not look at higher scans tp.clt_3d_passes.size() - 1, // final int measured_scan_index, // will not look at higher scans
0, // final int start_scan_index, 0, // final int start_scan_index,
tp.clt_3d_passes.get(bg_pass).getSelected(), // selected , // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
clt_parameters.ex_min_over,// final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
disp_index, // final int disp_index, disp_index, // final int disp_index,
str_index, // final int str_index, str_index, // final int str_index,
null, // final double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values null, // final double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
...@@ -5973,7 +5996,14 @@ public class QuadCLT { ...@@ -5973,7 +5996,14 @@ public class QuadCLT {
dbg_x, // final int dbg_x, dbg_x, // final int dbg_x,
dbg_y, // final int dbg_y, dbg_y, // final int dbg_y,
debugLevel); // final int debugLevel) debugLevel); // final int debugLevel)
if (over_infinity) {
tp.filterOverBackground(
filtered_disp_strength, // final double [][] ds,
filtered_bgnd_disp_strength[1], // final double [] bg_strength,
tp.clt_3d_passes.get(bg_pass).getSelected(), // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
filterMinStrength, // final double minStrength,
clt_parameters.ex_min_over); // final double ex_min_over // when expanding over previously detected (by error) background, disregard far tiles
}
refine_pass = tp.clt_3d_passes.size(); // 1 refine_pass = tp.clt_3d_passes.size(); // 1
// refine pass uses hor/vert thresholds inside // refine pass uses hor/vert thresholds inside
tp.refinePassSetup( // prepare tile tasks for the refine pass (re-measure disparities) tp.refinePassSetup( // prepare tile tasks for the refine pass (re-measure disparities)
...@@ -6030,6 +6060,8 @@ public class QuadCLT { ...@@ -6030,6 +6060,8 @@ public class QuadCLT {
tp.clt_3d_passes, // final ArrayList <CLTPass3d> passes, tp.clt_3d_passes, // final ArrayList <CLTPass3d> passes,
bg_pass, // final int firstPass, bg_pass, // final int firstPass,
tp.clt_3d_passes.size(), // final int lastPassPlus1, tp.clt_3d_passes.size(), // final int lastPassPlus1,
// tp.clt_3d_passes.get(bg_pass).getSelected(), // selected , // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
// clt_parameters.ex_min_over,// final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
tp.getTrustedCorrelation(), // final double trustedCorrelation, tp.getTrustedCorrelation(), // final double trustedCorrelation,
0.0, // clt_parameters.bgnd_range, // final double disp_far, // limit results to the disparity range 0.0, // clt_parameters.bgnd_range, // final double disp_far, // limit results to the disparity range
clt_parameters.grow_disp_max, // final double disp_near, clt_parameters.grow_disp_max, // final double disp_near,
...@@ -6041,6 +6073,16 @@ public class QuadCLT { ...@@ -6041,6 +6073,16 @@ public class QuadCLT {
false, // final boolean usePoly) // use polynomial method to find max), valid if useCombo == false false, // final boolean usePoly) // use polynomial method to find max), valid if useCombo == false
true, // final boolean copyDebug) true, // final boolean copyDebug)
debugLevel); debugLevel);
if (over_infinity) {
tp.filterOverBackground(
extended_pass, // final CLTPass3d pass,
comboMinStrength, // final double minStrength,
comboMinStrengthHor, // final double minStrengthHor,
comboMinStrengthVert, //final double minStrengthVert,
tp.clt_3d_passes.get(bg_pass).getSelected(), // selected , // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
clt_parameters.ex_min_over,// final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
filtered_disp_strength);
}
if (show_expand || (clt_parameters.show_expand && last_pass)) tp.showScan( if (show_expand || (clt_parameters.show_expand && last_pass)) tp.showScan(
tp.clt_3d_passes.get(refine_pass), // CLTPass3d scan, tp.clt_3d_passes.get(refine_pass), // CLTPass3d scan,
"after_refine-combine-"+(tp.clt_3d_passes.size() - 1)); "after_refine-combine-"+(tp.clt_3d_passes.size() - 1));
...@@ -6076,7 +6118,7 @@ public class QuadCLT { ...@@ -6076,7 +6118,7 @@ public class QuadCLT {
} }
boolean show_ex_debug = show_retry_far || (clt_parameters.show_retry_far && last_pass) || dbg_pass;
num_extended = tp.setupExtendDisparity( num_extended = tp.setupExtendDisparity(
extended_pass, // final CLTPass3d scan, // combined scan with max_tried_disparity, will be modified to re-scan extended_pass, // final CLTPass3d scan, // combined scan with max_tried_disparity, will be modified to re-scan
tp.clt_3d_passes.get(refine_pass), // final CLTPass3d last_scan, // last prepared tile - can use last_scan.disparity, .border_tiles and .selected tp.clt_3d_passes.get(refine_pass), // final CLTPass3d last_scan, // last prepared tile - can use last_scan.disparity, .border_tiles and .selected
...@@ -6091,7 +6133,7 @@ public class QuadCLT { ...@@ -6091,7 +6133,7 @@ public class QuadCLT {
clt_parameters.grow_retry_inf, // final boolean grow_retry_inf, // Retry border tiles that were identified as infinity earlier clt_parameters.grow_retry_inf, // final boolean grow_retry_inf, // Retry border tiles that were identified as infinity earlier
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters, clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
geometryCorrection, // GeometryCorrection geometryCorrection, geometryCorrection, // GeometryCorrection geometryCorrection,
(show_retry_far|| (clt_parameters.show_retry_far && last_pass)), // true, // final boolean show_debug, show_ex_debug, // true, // final boolean show_debug,
threadsMax, // maximal number of threads to launch threadsMax, // maximal number of threads to launch
updateStatus, updateStatus,
...@@ -6169,8 +6211,16 @@ public class QuadCLT { ...@@ -6169,8 +6211,16 @@ public class QuadCLT {
false, // final boolean usePoly) // use polynomial method to find max), valid if useCombo == false false, // final boolean usePoly) // use polynomial method to find max), valid if useCombo == false
true, // final boolean copyDebug) true, // final boolean copyDebug)
// (num_expand == 9)? 2: debugLevel); // (num_expand == 9)? 2: debugLevel);
debugLevel); debugLevel);
/*
tp.filterOverBackground(
combo_pass, // final CLTPass3d pass,
comboMinStrength, // final double minStrength,
comboMinStrengthHor, // final double minStrengthHor,
comboMinStrengthVert, //final double minStrengthVert,
tp.clt_3d_passes.get(bg_pass).getSelected(), // selected , // final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
clt_parameters.ex_min_over);// final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
*/
tp.clt_3d_passes.add(combo_pass); tp.clt_3d_passes.add(combo_pass);
// refine_pass = tp.clt_3d_passes.size(); // refine_pass = tp.clt_3d_passes.size();
// } // }
...@@ -6180,12 +6230,19 @@ public class QuadCLT { ...@@ -6180,12 +6230,19 @@ public class QuadCLT {
if (last_pass) { if (last_pass) {
break; break;
} else if (numLeftRemoved[0] == 0){ } else if (numLeftRemoved[0] == 0){
if (over_infinity) last_pass = true; System.out.println("**** processCLTQuad3d(): nothing to sxpand ***");
else over_infinity = true; System.out.println("!clt_parameters.ex_over_bgnd="+clt_parameters.ex_over_bgnd+" over_infinity="+over_infinity);
if (!clt_parameters.ex_over_bgnd || over_infinity) last_pass = true;
else {
over_infinity = true;
if (debugLevel > -1){
System.out.println("===== processCLTQuad3d(): trying to expand over previously identified background (may be by error)====");
}
}
} }
} }
show_expand = (clt_parameters.show_expand && (max_expand <= 10));
tp.showScan( tp.showScan(
tp.clt_3d_passes.get(tp.clt_3d_passes.size()-2), // CLTPass3d scan, tp.clt_3d_passes.get(tp.clt_3d_passes.size()-2), // CLTPass3d scan,
"after_pre_last_combo_pass-"+(tp.clt_3d_passes.size()-2)); //String title) "after_pre_last_combo_pass-"+(tp.clt_3d_passes.size()-2)); //String title)
......
...@@ -1436,13 +1436,17 @@ public class SuperTiles{ ...@@ -1436,13 +1436,17 @@ public class SuperTiles{
// TODO: Remove when promoting PlaneData // TODO: Remove when promoting PlaneData
final TilePlanes tpl = new TilePlanes(tileSize,superTileSize, geometryCorrection); final TilePlanes tpl = new TilePlanes(tileSize,superTileSize, geometryCorrection);
final double [][][][] plane_disp_strength = new double [nStiles][][][]; final double [][][][] plane_disp_strength = new double [nStiles][][][];
// DEBUG feature:
final double [][] zero_tilts = {{0.0,0.0}}; // set to null for float
for (int ithread = 0; ithread < threads.length; ithread++) { for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() { threads[ithread] = new Thread() {
public void run() { public void run() {
double [][][] plane_tilts = null; // used only forworld_plane_norm != null
// to get tile disparities needed to calculate tilts
for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) { for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) {
// int dl = (nsTile == debug_stile) ? 3 : 0; // int dl = (nsTile == debug_stile) ? 3 : 0;
int dl = ((debugLevel > 1) && (nsTile == debug_stile)) ? 3: debugLevel; int dl = ((debugLevel > 1) && (nsTile == debug_stile)) ? 3: debugLevel;
if (dl > 1){ if (dl > 2){
System.out.println("getPlaneDispStrengths(): nsTile="+nsTile); System.out.println("getPlaneDispStrengths(): nsTile="+nsTile);
} }
int stileY = nsTile / stilesX; int stileY = nsTile / stilesX;
...@@ -1457,7 +1461,48 @@ public class SuperTiles{ ...@@ -1457,7 +1461,48 @@ public class SuperTiles{
measuredLayers, // MeasuredLayers measuredLayers, measuredLayers, // MeasuredLayers measuredLayers,
plPreferDisparity); // boolean preferDisparity) plPreferDisparity); // boolean preferDisparity)
if (smplMode && (world_plane_norm != null)) {
plane_tilts = new double [measuredLayers.getNumLayers()][][];
for (int ml = 0; ml < plane_tilts.length; ml++) if ((stMeasSel & ( 1 << ml)) != 0){
double [][] tile_disp_strengths = measuredLayers.getDisparityStrength(
ml, // int num_layer,
stileX, // int stX,
stileY, // int stY,
null, // boolean [] sel_in,
strength_floor, // double strength_floor,
strength_pow, // double strength_pow,
true); // boolean null_if_none);
if (tile_disp_strengths == null){
plane_tilts[ml] = zero_tilts;
} else {
plane_tilts[ml] = pd0.getDisparityTilts(
world_plane_norm, // double [] world_normal_xyz,
tile_disp_strengths, // double [][] tile_disp_strengths,
debugLevel); // int debugLevel);
if(dl > 2){
if (plane_tilts[ml] != null){
for (int k = 0; k < 2; k++) {
System.out.println((k > 0) ? "tilt Y, pix/tile" : "tilt X, pix/tile");
for (int i = 0; i < 2 * superTileSize; i++){
System.out.print(i+",");
for (int j = 0; j < 2 * superTileSize; j++){
int it = j + 2 * superTileSize * i;
if (plane_tilts[ml][it] !=null){
System.out.print(String.format("%f7.4", plane_tilts[ml][it][k]));
}
if (j < (2 * superTileSize - 1)){
System.out.print(", ");
}
}
System.out.println();
}
System.out.println();
}
}
}
}
}
}
plane_disp_strength[nsTile] = new double[measuredLayers.getNumLayers()][][]; plane_disp_strength[nsTile] = new double[measuredLayers.getNumLayers()][][];
...@@ -1469,13 +1514,18 @@ public class SuperTiles{ ...@@ -1469,13 +1514,18 @@ public class SuperTiles{
stileX, // int stX, stileX, // int stX,
stileY, // int stY, stileY, // int stY,
null, // boolean [] sel_in, null, // boolean [] sel_in,
// ((world_plane_norm == null)? zero_tilts: null), // double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
((world_plane_norm == null)? zero_tilts: plane_tilts[ml]), // double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
strength_floor, // double strength_floor, strength_floor, // double strength_floor,
strength_pow, // double strength_pow, strength_pow, // double strength_pow,
smplSide, // int smplSide, // = 2; // Sample size (side of a square) smplSide, // int smplSide, // = 2; // Sample size (side of a square)
smplNum, //int smplNum, // = 3; // Number after removing worst (should be >1) smplNum, //int smplNum, // = 3; // Number after removing worst (should be >1)
smplRms, //double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample smplRms, //double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
true, // boolean null_if_none); // true, // boolean null_if_none);
smplWnd, // boolean smplWnd, // smplWnd, // boolean smplWnd, //
2.0, // double max_abs_tilt, // = 2.0; // pix per tile
0.2, // double max_rel_tilt, // = 0.2; // (pix / disparity) per tile
true, // boolean null_if_none);
dl); dl);
} else { } else {
plane_disp_strength[nsTile][ml] = measuredLayers.getDisparityStrength( plane_disp_strength[nsTile][ml] = measuredLayers.getDisparityStrength(
...@@ -1505,7 +1555,7 @@ public class SuperTiles{ ...@@ -1505,7 +1555,7 @@ public class SuperTiles{
if (dl > 0) { if (dl > 0) {
System.out.println("Plane tilted disparity for stileX = "+stileX+" stileY="+stileY+", average disparity "+(sd/sw)); System.out.println("Plane tilted disparity for stileX = "+stileX+" stileY="+stileY+", average disparity "+(sd/sw));
} }
if (dl>1) { if (dl>2) {
String [] dbg_titles = showSupertileSeparationTitles( plane_disp_strength[nsTile], null); String [] dbg_titles = showSupertileSeparationTitles( plane_disp_strength[nsTile], null);
double [][] dbg_img = showSupertileSeparation(false, plane_disp_strength[nsTile], null); // plane_sels); double [][] dbg_img = showSupertileSeparation(false, plane_disp_strength[nsTile], null); // plane_sels);
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays();
...@@ -1519,7 +1569,7 @@ public class SuperTiles{ ...@@ -1519,7 +1569,7 @@ public class SuperTiles{
null, // boolean [][] tile_sel, // null - do not use, {} use all (will be modified) null, // boolean [][] tile_sel, // null - do not use, {} use all (will be modified)
plane_disp_strength[nsTile], // double [][][] disp_str, // calculate just once if null plane_disp_strength[nsTile], // double [][][] disp_str, // calculate just once if null
dl); // 1); // int debugLevel); dl); // 1); // int debugLevel);
if (dl>1) { if (dl>2) {
String [] dbg_titles = showSupertileSeparationTitles( plane_disp_strength[nsTile], null); String [] dbg_titles = showSupertileSeparationTitles( plane_disp_strength[nsTile], null);
double [][] dbg_img = showSupertileSeparation(false, plane_disp_strength[nsTile], null); // plane_sels); double [][] dbg_img = showSupertileSeparation(false, plane_disp_strength[nsTile], null); // plane_sels);
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays();
...@@ -2623,7 +2673,7 @@ public class SuperTiles{ ...@@ -2623,7 +2673,7 @@ public class SuperTiles{
highMix, //final double highMix, // stHighMix = 0.4; // Consider merging initial planes if jumps between ratio above highMix, //final double highMix, // stHighMix = 0.4; // Consider merging initial planes if jumps between ratio above
world_hor, // final double [] world_hor, // horizontal plane normal (default [0.0, 1.0, 0.0]) world_hor, // final double [] world_hor, // horizontal plane normal (default [0.0, 1.0, 0.0])
show_histograms, // final boolean show_histograms, show_histograms, // final boolean show_histograms,
debugLevel+0, // final int debugLevel, debugLevel+1, // final int debugLevel,
dbg_X, // final int dbg_X, dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y) dbg_Y); // final int dbg_Y)
......
...@@ -972,6 +972,82 @@ public class TilePlanes { ...@@ -972,6 +972,82 @@ public class TilePlanes {
return true; return true;
} }
/**
* This method is used to filter sample plates by averaging subset of the square
* sample and removing outliers. Currently only constant disparity and horizontal
* surfaces are used, this method is used for horizontal ones to find tilts
* d_disp/d_tx, d_disp/d_ty measured in pixels per tile
* @param world_normal_xyz world space normal vector, currently only "up" - (0,1,0) is used
* @param tile_disp_strengths [0] - unfiltered disparities for the supertile,
* [1] - unfiltered correlation strengths for the
* supertile (just 0/non-0)
* @param debugLevel
* @return per tile arrays of either nulls or tilt-x, tilt-y pairs
*/
public double [][] getDisparityTilts(
double [] world_normal_xyz,
double [][] tile_disp_strengths,
int debugLevel)
{
final int stSize2 = 2* stSize;
final int stCenter = (stSize2 + 1) * superTileSize / 2;
final double [][] tile_tilts = new double [stSize2*stSize2][];
final double [] apy_vector = {0.0, 0.0, 1.0};
final Matrix py_vector = new Matrix(apy_vector,3);
invalidateCalculated();
double [] px_py = getCenterPxPy();
Matrix normal_row = new Matrix(world_normal_xyz,1); // 1x3
Matrix normal_col = new Matrix(world_normal_xyz,3); // 3x1
// find world coordinates of the center of tile intersection with the plane
for (int lTile = 0; lTile < tile_disp_strengths[1].length; lTile++) if (tile_disp_strengths[1][lTile] > 0.0){
int tY = lTile / stSize2;
int tX = lTile % stSize2;
double px = px_py[0] + tX - stCenter;
double py = px_py[1] + tY - stCenter;
double disp = tile_disp_strengths[0][lTile];
// get world coordinates for each tile as determined individually
Matrix t_xyz = new Matrix(geometryCorrection.getWorldCoordinates(
px,
px,
disp,
this.correctDistortions),3);
// double n_by_w = normal_row.times(t_xyz).get(0, 0);
// take pixel vector parallel to py and convert it to the world space
Matrix jacobian = new Matrix(geometryCorrection.getWorldJacobian(
px,
py,
disp,
this.correctDistortions,
(debugLevel > 2)
));
Matrix inv_jacobian = jacobian.inverse();
Matrix wpy_vector = jacobian.times(py_vector); // 3 rows, 1 column py vector in the real world
// get 2 in-plane vectors in the real world
Matrix wv1 = cross3d(normal_col, wpy_vector);
Matrix wv2 = cross3d(normal_col, wv1);
// convert then to pixel space
Matrix pv1 = inv_jacobian.times(wv1);
Matrix pv2 = inv_jacobian.times(wv2);
// Get plane normal in the pixel space
Matrix pn = cross3d(pv1, pv2);
// convert to the two tilts (d/dpx, d/dpy). We will need in pix/tile, not pix/pix
double [] txty = {
-pn.get(1, 0)/pn.get(0, 0)*stSize,
-pn.get(2, 0)/pn.get(0, 0)*stSize};
tile_tilts[lTile] = txty;
}
// if (debugLevel > 1) {
// System.out.println("st_xyz = {"+st_xyz.get(0, 0)+","+st_xyz.get(1, 0)+","+st_xyz.get(2, 0)+"}"+" ="+n_by_w);
// }
return tile_tilts;
}
/** /**
* Tilt disparity values around the supertile center (this.zxy) so constant disparity in the output * Tilt disparity values around the supertile center (this.zxy) so constant disparity in the output
* corresponds to the real world plane parallel to the provided one. Used to discriminate tiles by * corresponds to the real world plane parallel to the provided one. Used to discriminate tiles by
...@@ -995,7 +1071,7 @@ public class TilePlanes { ...@@ -995,7 +1071,7 @@ public class TilePlanes {
{ {
final int stSize2 = 2* stSize; final int stSize2 = 2* stSize;
invalidateCalculated(); invalidateCalculated();
double [] px_py = getCenterPxPy(); double [] px_py = getCenterPxPy(); // tile center
// find world coordinates of the center of tile intersection with the plane // find world coordinates of the center of tile intersection with the plane
Matrix st_xyz = new Matrix(geometryCorrection.getWorldCoordinates( Matrix st_xyz = new Matrix(geometryCorrection.getWorldCoordinates(
px_py[0], px_py[0],
...@@ -2251,7 +2327,8 @@ public class TilePlanes { ...@@ -2251,7 +2327,8 @@ public class TilePlanes {
double val = values[0]; double val = values[0];
if ((dispNorm > 0.0) && (zxy[0] > dispNorm)){ if ((dispNorm > 0.0) && (zxy[0] > dispNorm)){
double k = dispNorm / zxy[0]; double k = dispNorm / zxy[0];
val *= k*k; // val *= k*k;
val *= k ; // reducing correction for the large disparities (making it sqrt() )
} }
return val; return val;
} }
...@@ -3671,6 +3748,13 @@ public class TilePlanes { ...@@ -3671,6 +3748,13 @@ public class TilePlanes {
int debugLevel){ int debugLevel){
return getWorldXYZ(this.correctDistortions, debugLevel); return getWorldXYZ(this.correctDistortions, debugLevel);
} }
/**
* Get vector from the camera perpendicular to the plane converted to the real world
* @param correct_distortions
* @param debugLevel
* @return
*/
public double [] getWorldXYZ( public double [] getWorldXYZ(
boolean correct_distortions, boolean correct_distortions,
int debugLevel) int debugLevel)
......
...@@ -280,6 +280,83 @@ public class TileProcessor { ...@@ -280,6 +280,83 @@ public class TileProcessor {
return combo_pass; return combo_pass;
} }
/**
* When expanding over previously identified background (may be in error) remove tiles from the
* composite scan (made by compositeScan()) that have small disparity (those should be identified
* on the first passes during background scan) or are not extra strong
* @param pass
* @param minStrength full correlation strength to consider data to be extra reliable (after conditioning)
* @param minStrengthHor horizontal (for vertical features) correlation strength to consider data to be extra reliable (after conditioning)
* @param minStrengthVert vertical (for horizontal features) correlation strength to consider data to be extra reliable (after conditioning)
* @param bg_tiles
* @param ex_min_over
* @param filtered_disp_strength disparity/strength determined by a floating plate (now 5x5 tiles). [1]Strength < 0 means that last measurement
* does not include this tile, 0.0 - to weak/too bad data, >0.0 - strength. If the whole
*/
public void filterOverBackground(
final CLTPass3d pass,
final double minStrength,
final double minStrengthHor,
final double minStrengthVert,
final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
final double [][] filtered_disp_strength
){
for (int ty = 0; ty < tilesY; ty ++) {
for (int tx = 0; tx < tilesX; tx ++) if (pass.tile_op[ty][tx] != 0) {
int nt = ty * tilesX + tx;
if (bg_tiles[nt]){
boolean [] good_comp = {true,true,true};
if ((filtered_disp_strength != null) && (filtered_disp_strength[1][nt] == 0.0)){ // -1 - not measured last time, 0.0 - too weak last time
good_comp = null;
} else {
if ((pass.calc_disparity[nt] < ex_min_over) || (pass.strength[nt]) <= minStrength) good_comp[0] = false;
if ((pass.calc_disparity_hor[nt] < ex_min_over) || (pass.strength_hor[nt]) <= minStrengthHor) good_comp[1] = false;
if ((pass.calc_disparity_vert[nt] < ex_min_over) || (pass.strength_vert[nt]) <= minStrengthVert) good_comp[2] = false;
}
if ((good_comp == null) || (!good_comp[0] && !good_comp[1] && !good_comp[2])){
pass.tile_op[ty][tx] = 0;
pass.texture_tiles = null;
for (int i = 0; i< ImageDtt.QUAD; i++) pass.disparity_map[ImageDtt.IMG_DIFF0_INDEX + i][nt] = 0.0;
}
if ((good_comp == null) || !good_comp[0]) {
pass.calc_disparity[nt] = Double.NaN;
pass.strength[nt] = 0.0;
if (pass.disparity_map[ImageDtt.DISPARITY_INDEX_CM]!=null ) pass.disparity_map[ImageDtt.DISPARITY_INDEX_CM][nt] = Double.NaN;
if (pass.disparity_map[ImageDtt.DISPARITY_STRENGTH_INDEX]!=null ) pass.disparity_map[ImageDtt.DISPARITY_STRENGTH_INDEX][nt] = 0.0;
}
if ((good_comp == null) || !good_comp[1]) {
pass.calc_disparity[nt] = Double.NaN;
pass.strength[nt] = 0.0;
if (pass.disparity_map[ImageDtt.DISPARITY_INDEX_HOR]!=null ) pass.disparity_map[ImageDtt.DISPARITY_INDEX_HOR][nt] = Double.NaN;
if (pass.disparity_map[ImageDtt.DISPARITY_INDEX_HOR_STRENGTH]!=null ) pass.disparity_map[ImageDtt.DISPARITY_INDEX_HOR_STRENGTH][nt] = 0.0;
}
if ((good_comp == null) || !good_comp[2]) {
pass.calc_disparity[nt] = Double.NaN;
pass.strength[nt] = 0.0;
if (pass.disparity_map[ImageDtt.DISPARITY_INDEX_VERT]!=null ) pass.disparity_map[ImageDtt.DISPARITY_INDEX_VERT][nt] = Double.NaN;
if (pass.disparity_map[ImageDtt.DISPARITY_INDEX_VERT_STRENGTH]!=null ) pass.disparity_map[ImageDtt.DISPARITY_INDEX_VERT_STRENGTH][nt] = 0.0;
}
}
}
}
}
public void filterOverBackground(
final double [][] ds,
final double [] bg_strength,
final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
final double minStrength,
final double ex_min_over // when expanding over previously detected (by error) background, disregard far tiles
){
for (int nt = 0; nt < bg_tiles.length; nt++) if (bg_tiles[nt]){
if ((ds[1][nt] <= minStrength ) || (ds[1][nt] < bg_strength[nt]) ||(ds[0][nt] < ex_min_over )){
ds[1][nt] =0.0;
ds[0][nt] =0.0;
}
}
}
/** /**
* Calculates calc_disparity, calc_disparity_hor, calc_disparity_vert, strength, strength_hor, strength_vert, * Calculates calc_disparity, calc_disparity_hor, calc_disparity_vert, strength, strength_hor, strength_vert,
* max_tried_disparity from the subset of a list of measurement passes (skipping non-measured) * max_tried_disparity from the subset of a list of measurement passes (skipping non-measured)
...@@ -305,7 +382,6 @@ public class TileProcessor { ...@@ -305,7 +382,6 @@ public class TileProcessor {
final ArrayList <CLTPass3d> passes, final ArrayList <CLTPass3d> passes,
final int firstPass, final int firstPass,
final int lastPassPlus1, final int lastPassPlus1,
// final boolean skip_combo, // do not process other combo scans
final double trustedCorrelation, final double trustedCorrelation,
final double disp_far, // limit results to the disparity range final double disp_far, // limit results to the disparity range
final double disp_near, final double disp_near,
...@@ -401,19 +477,22 @@ public class TileProcessor { ...@@ -401,19 +477,22 @@ public class TileProcessor {
if (adiff <= trustedCorrelation){ if (adiff <= trustedCorrelation){
double disp = mdisp/corr_magic_scale + pass.disparity[ty][tx]; double disp = mdisp/corr_magic_scale + pass.disparity[ty][tx];
if ((disp >= disp_far) && (disp <= disp_near) && !Double.isNaN(adiff)){ // do not consider tiles over background if they are far and initially identified as background
if (strength >= minStrength) { // if ((bg_tiles == null) || !bg_tiles[nt] || (disp >= ex_min_over)) {
if (!(adiff >= adiff_best)){ // adiff_best == Double.NaN works too if ((disp >= disp_far) && (disp <= disp_near) && !Double.isNaN(adiff)){
adiff_best = adiff; if (strength >= minStrength) {
best_index = ipass; if (!(adiff >= adiff_best)){ // adiff_best == Double.NaN works too
} adiff_best = adiff;
} else { best_index = ipass;
if ((last && (strength > 0.0)) || (strength > strongest_weak)){ }
strongest_weak = strength; } else {
best_weak_index = ipass; if ((last && (strength > 0.0)) || (strength > strongest_weak)){
strongest_weak = strength;
best_weak_index = ipass;
}
} }
} }
} // }
} }
if (adiff_hor <= trustedCorrelation){ if (adiff_hor <= trustedCorrelation){
...@@ -597,6 +676,8 @@ public class TileProcessor { ...@@ -597,6 +676,8 @@ public class TileProcessor {
* scans, assuming the sample square can (almost) freely tilt for the best fit * scans, assuming the sample square can (almost) freely tilt for the best fit
* @param measured_scan_index index in the clt_3d_passes list of the latest measured scan * @param measured_scan_index index in the clt_3d_passes list of the latest measured scan
* @param start_scan_index lowest scan to use data from * @param start_scan_index lowest scan to use data from
* @param bg_tiles background tiles selection, may be null
* @param ex_min_over only consider tiles that are nearer than this, if they are previously identified (by error?) as background
* @param disp_index disparity index in disparity_map (normally ImageDtt.DISPARITY_INDEX_CM) * @param disp_index disparity index in disparity_map (normally ImageDtt.DISPARITY_INDEX_CM)
* @param str_index strength index in disparity_map (normally[ImageDtt.DISPARITY_STRENGTH_INDEX) * @param str_index strength index in disparity_map (normally[ImageDtt.DISPARITY_STRENGTH_INDEX)
* @param tiltXY null (free floating) or fixed {tilt_x, tilt_y) of the sample * @param tiltXY null (free floating) or fixed {tilt_x, tilt_y) of the sample
...@@ -619,6 +700,8 @@ public class TileProcessor { ...@@ -619,6 +700,8 @@ public class TileProcessor {
public double [][] getFilteredDisparityStrength( public double [][] getFilteredDisparityStrength(
final int measured_scan_index, // will not look at higher scans (OK to be non-measured, last measured will be used then) final int measured_scan_index, // will not look at higher scans (OK to be non-measured, last measured will be used then)
final int start_scan_index, final int start_scan_index,
final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
final int disp_index, final int disp_index,
final int str_index, final int str_index,
final double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values final double [] tiltXY, // null - free with limit on both absolute (2.0?) and relative (0.2) values
...@@ -701,275 +784,277 @@ public class TileProcessor { ...@@ -701,275 +784,277 @@ public class TileProcessor {
double disp = measured_scan.disparity_map[disp_index][nTile]; double disp = measured_scan.disparity_map[disp_index][nTile];
if (Math.abs(disp) <= trustedCorrelation){ if (Math.abs(disp) <= trustedCorrelation){
disp = disp/corr_magic_scale + measured_scan.disparity[tileY][tileX]; disp = disp/corr_magic_scale + measured_scan.disparity[tileY][tileX];
// Center tile is valid, makes sense to investigate neighbors from this or earlier tiles if ((bg_tiles == null) || !bg_tiles[nTile] || (disp >= ex_min_over)) {
int num_in_sample = 1; // Center tile is valid, makes sense to investigate neighbors from this or earlier tiles
double sum_wnd = 1.0; int num_in_sample = 1;
boolean [] smpl_sel = new boolean [smplLen]; double sum_wnd = 1.0;
double [] smpl_d = new double [smplLen]; boolean [] smpl_sel = new boolean [smplLen];
double [] smpl_p = new double [smplLen]; double [] smpl_d = new double [smplLen];
double [] smpl_w = new double [smplLen]; double [] smpl_p = new double [smplLen];
// add center tile double [] smpl_w = new double [smplLen];
smpl_sel[center_index] = true; // add center tile
smpl_d[center_index] = disp; smpl_sel[center_index] = true;
smpl_w[center_index] = w; smpl_d[center_index] = disp;
// process tiles around, looking for the best fit among other tiles, skipping the center smpl_w[center_index] = w;
for (int sy = 0; sy < smplSide; sy++){ // process tiles around, looking for the best fit among other tiles, skipping the center
int y = tileY + sy - smpl_center; for (int sy = 0; sy < smplSide; sy++){
if ((y >= 0) && (y < tilesY)) { int y = tileY + sy - smpl_center;
for (int sx = 0; sx < smplSide; sx++){ if ((y >= 0) && (y < tilesY)) {
int x = tileX + sx - smpl_center; for (int sx = 0; sx < smplSide; sx++){
if ((x >= 0) && (x < tilesX)) { int x = tileX + sx - smpl_center;
int indxs = sy*smplSide + sx; if ((x >= 0) && (x < tilesX)) {
if (indxs != center_index) { // already assigned int indxs = sy*smplSide + sx;
int nTile1 = y * tilesX + x; if (indxs != center_index) { // already assigned
int best_pass_index = -1; int nTile1 = y * tilesX + x;
double best_adiff = Double.NaN; int best_pass_index = -1;
for (int ipass = 0; ipass < measured_list.size(); ipass++){ double best_adiff = Double.NaN;
CLTPass3d scan = measured_list.get(ipass); for (int ipass = 0; ipass < measured_list.size(); ipass++){
double w1 = scan.disparity_map[str_index][nTile1] - strength_floor; CLTPass3d scan = measured_list.get(ipass);
if (w1 > 0) { double w1 = scan.disparity_map[str_index][nTile1] - strength_floor;
if (strength_pow != 1.0){ if (w1 > 0) {
if (strength_pow == 0.0) { if (strength_pow != 1.0){
w1 = 1.0; if (strength_pow == 0.0) {
} else { w1 = 1.0;
w1= Math.pow(w, strength_pow); } else {
w1= Math.pow(w, strength_pow);
}
} }
} double disp1 = scan.disparity_map[disp_index][nTile1];
double disp1 = scan.disparity_map[disp_index][nTile1]; if (Math.abs(disp1) <= trustedCorrelation){
if (Math.abs(disp1) <= trustedCorrelation){ disp1 = disp1/corr_magic_scale + scan.disparity[y][x];
disp1 = disp1/corr_magic_scale + scan.disparity[y][x]; double adiff = Math.abs(disp1 - disp);
double adiff = Math.abs(disp1 - disp); if ((best_pass_index < 0) || (adiff < best_adiff)){
if ((best_pass_index < 0) || (adiff < best_adiff)){ best_pass_index = ipass;
best_pass_index = ipass; best_adiff = adiff;
best_adiff = adiff; smpl_d[indxs] = disp1;
smpl_d[indxs] = disp1; smpl_w[indxs] = w1;
smpl_w[indxs] = w1; // not summed yet!
// not summed yet! }
} }
} }
} }
} if (best_pass_index >= 0){
if (best_pass_index >= 0){ smpl_w [indxs] *= smpl_weights[indxs];
smpl_w [indxs] *= smpl_weights[indxs]; smpl_sel[indxs] = true;
smpl_sel[indxs] = true; sum_wnd += smpl_weights[indxs];
sum_wnd += smpl_weights[indxs]; num_in_sample ++;
num_in_sample ++; }
}
}
} }
} }
} }
} }
}
// filled all neighbor tiles data. See if sufficient to proceed, then remove extra tiles
// filled all neighbor tiles data. See if sufficient to proceed, then remove extra tiles // and process the remaining ones
// and process the remaining ones
if (num_in_sample >= smplNum){ // try, remove worst
if (num_in_sample >= smplNum){ // try, remove worst sample_loop:
sample_loop: {
{ boolean en_tilt = (tiltXY == null);
boolean en_tilt = (tiltXY == null); if (en_tilt) { // make sure there are enough samples and not all of them are on the same line
if (en_tilt) { // make sure there are enough samples and not all of them are on the same line if (!notColinear(smpl_sel,smplSide)){
if (!notColinear(smpl_sel,smplSide)){ en_tilt = false;
en_tilt = false; }
} }
} if (en_tilt) { // enable floating tilt
if (en_tilt) { // enable floating tilt double sd2 = 0.0, d_center = 0.0, sw = 0.0;
double sd2 = 0.0, d_center = 0.0, sw = 0.0;
// TODO: making simple - recalculate after removing. Can be done more efficient.
// TODO: making simple - recalculate after removing. Can be done more efficient. while (num_in_sample >= smplNum) { // try, remove worst
while (num_in_sample >= smplNum) { // try, remove worst sd2 = 0.0;
sd2 = 0.0; d_center = 0.0;
d_center = 0.0; sw = 0.0;
sw = 0.0;
double [][][] mdata = new double [num_in_sample][3][];
double [][][] mdata = new double [num_in_sample][3][]; int mindx = 0;
int mindx = 0; for (int sy = 0; sy < smplSide; sy++){
for (int sy = 0; sy < smplSide; sy++){ for (int sx = 0; sx < smplSide; sx++){
for (int sx = 0; sx < smplSide; sx++){ int indxs = sy * smplSide + sx;
int indxs = sy * smplSide + sx; if (smpl_sel[indxs]) {
if (smpl_sel[indxs]) { mdata[mindx][0] = new double [2];
mdata[mindx][0] = new double [2]; mdata[mindx][0][0] = sx - smpl_dcenter;
mdata[mindx][0][0] = sx - smpl_dcenter; mdata[mindx][0][1] = sy - smpl_dcenter;
mdata[mindx][0][1] = sy - smpl_dcenter; mdata[mindx][1] = new double [1];
mdata[mindx][1] = new double [1]; mdata[mindx][1][0] = smpl_d[indxs];
mdata[mindx][1][0] = smpl_d[indxs]; mdata[mindx][2] = new double [1];
mdata[mindx][2] = new double [1]; mdata[mindx][2][0] = smpl_w[indxs];
mdata[mindx][2][0] = smpl_w[indxs]; mindx ++;
mindx ++; }
} }
} }
} double[][] approx2d = pa.quadraticApproximation(
double[][] approx2d = pa.quadraticApproximation( mdata,
mdata, true, // boolean forceLinear, // use linear approximation
true, // boolean forceLinear, // use linear approximation thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
thresholdLin, // threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
thresholdQuad, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail) debugLevel);
debugLevel); if (approx2d == null){
if (approx2d == null){ if (debugLevel > -1){
if (debugLevel > -1){ System.out.println("getFilteredDisparityStrength(): can not find linear approximation");
System.out.println("getFilteredDisparityStrength(): can not find linear approximation"); }
break sample_loop;
} }
break sample_loop; // limit tilt to be within range
} // double max_abs_tilt, // = 2.0; // pix per tile
// limit tilt to be within range // double max_rel_tilt, // = 0.2; // (pix / disparity) per tile
// double max_abs_tilt, // = 2.0; // pix per tile double max_tilt = Math.min(max_abs_tilt, max_rel_tilt * approx2d[0][2]);
// double max_rel_tilt, // = 0.2; // (pix / disparity) per tile boolean overlimit = (Math.abs(approx2d[0][0]) > max_tilt) || (Math.abs(approx2d[0][1]) > max_tilt);
double max_tilt = Math.min(max_abs_tilt, max_rel_tilt * approx2d[0][2]); if (overlimit) {
boolean overlimit = (Math.abs(approx2d[0][0]) > max_tilt) || (Math.abs(approx2d[0][1]) > max_tilt); approx2d[0][0] = Math.min(approx2d[0][0], max_tilt);
if (overlimit) { approx2d[0][1] = Math.min(approx2d[0][1], max_tilt);
approx2d[0][0] = Math.min(approx2d[0][0], max_tilt); approx2d[0][0] = Math.max(approx2d[0][0], -max_tilt);
approx2d[0][1] = Math.min(approx2d[0][1], max_tilt); approx2d[0][1] = Math.max(approx2d[0][1], -max_tilt);
approx2d[0][0] = Math.max(approx2d[0][0], -max_tilt); }
approx2d[0][1] = Math.max(approx2d[0][1], -max_tilt); // subtract tilt from disparity
} for (int sy = 0; sy < smplSide; sy++){
// subtract tilt from disparity for (int sx = 0; sx < smplSide; sx++){
for (int sy = 0; sy < smplSide; sy++){ int indxs = sy * smplSide + sx;
for (int sx = 0; sx < smplSide; sx++){ if (smpl_sel[indxs]) {
int indxs = sy * smplSide + sx; smpl_p[indxs] = approx2d[0][0] * (sx - smpl_dcenter) + approx2d[0][1] * (sy - smpl_dcenter) + approx2d[0][2];
if (smpl_sel[indxs]) { }
smpl_p[indxs] = approx2d[0][0] * (sx - smpl_dcenter) + approx2d[0][1] * (sy - smpl_dcenter) + approx2d[0][2];
} }
} }
}
if (overlimit){ // re-calculate disparity average (in the center)
if (overlimit){ // re-calculate disparity average (in the center) double sd=0.0;
double sd=0.0; for (int indxs = 0; indxs < smplLen;indxs++) if (smpl_sel[indxs]) {
double d = smpl_d[indxs] - smpl_p[indxs];
double dw = d * smpl_w[indxs];
sd += dw;
sw += smpl_w[indxs];
}
sd /= sw;
for (int indxs = 0; indxs < smplLen;indxs++) if (smpl_sel[indxs]) {
smpl_p[indxs] += sd;
}
approx2d[0][2] += sd;
}
d_center = approx2d[0][2];
sw = 0.0;
for (int indxs = 0; indxs < smplLen;indxs++) if (smpl_sel[indxs]) { for (int indxs = 0; indxs < smplLen;indxs++) if (smpl_sel[indxs]) {
double d = smpl_d[indxs] - smpl_p[indxs]; double d = smpl_d[indxs] - smpl_p[indxs];
double dw = d * smpl_w[indxs]; double dw = d * smpl_w[indxs];
sd += dw; // sd += dw;
sw += smpl_w[indxs]; sd2 += dw * smpl_d[indxs];
sw += smpl_w[indxs];
} }
sd /= sw;
for (int indxs = 0; indxs < smplLen;indxs++) if (smpl_sel[indxs]) {
smpl_p[indxs] += sd; // remove worst - it should not make remaining set
if (num_in_sample > smplNum) { // remove worst if it is not the last run where only calculations are needed
// double d_mean = sd/sw;
int iworst = -1;
double dworst2 = 0.0;
for (int indxs = 0; indxs < smplLen; indxs++) if (smpl_sel[indxs]) {
// double d2 = (smpl_d[i] - d_mean);
double d2 = smpl_d[indxs] - smpl_p[indxs];
d2 *=d2;
if (d2 > dworst2) {
if (notColinearWithout (
indxs, // int indx,
smpl_sel, // boolean [] sel,
smplSide)) { // int side))
iworst = indxs;
dworst2 = d2;
}
}
}
if (iworst < 0){
System.out.println("**** this is a BUG in getFilteredDisparityStrength() can not find the worst sample ****");
break;
}
// remove worst sample
smpl_sel[iworst] = false;
// double dw = smpl_d[iworst] * smpl_w[iworst];
// sd -= dw;
// sd2 -= dw * smpl_d[iworst];
// sw -= smpl_w[iworst];
sum_wnd -= smpl_weights[iworst];
num_in_sample --;
} else {
break;
}
} // removing worst tiles, all done,
// calculate variance of the remaining set
if (sw > 0.0) {
// sd /= sw;
// sd2 /= sw;
double var = sd2/sw; // - sd * sd;
if (var < smplVar) { // good, save in the result array
disp_strength[0][nTile] = d_center;
// ds[1][indx] = sw * lapWeight[dy][dx] /num_in_sample; // average weights, multiply by window //** TODO: change
disp_strength[1][nTile] = sw /sum_wnd; // average weights, multiply by window //** TODO: change
} }
approx2d[0][2] += sd;
} }
d_center = approx2d[0][2];
sw = 0.0; } else { // fixed tilt
for (int indxs = 0; indxs < smplLen;indxs++) if (smpl_sel[indxs]) { // tilt around center
double d = smpl_d[indxs] - smpl_p[indxs]; if ((tiltXY != null) && ((tiltXY[0] != 0.0) || (tiltXY[1] != 0.0))){
double dw = d * smpl_w[indxs]; for (int sy = 0; sy < smplSide; sy++){
// sd += dw; for (int sx = 0; sx < smplSide; sx++){
sd2 += dw * smpl_d[indxs]; int indxs = sy * smplSide + sx;
sw += smpl_w[indxs]; if (smpl_w[indxs] > 0.0) {
smpl_d[indxs] -= tiltXY[0]* (sx - smpl_dcenter) + tiltXY[1]* (sy - smpl_dcenter);
}
}
}
} }
// remove worst - it should not make remaining set // calculate
if (num_in_sample > smplNum) { // remove worst if it is not the last run where only calculations are needed double sd=0.0, sd2 = 0.0, sw = 0.0;
// double d_mean = sd/sw; for (int i = 0; i < smplLen; i++) if (smpl_sel[i]) {
double dw = smpl_d[i] * smpl_w[i];
sd += dw;
sd2 += dw * smpl_d[i];
sw += smpl_w[i];
}
// remove worst, update sd2, sd and sw
while ((num_in_sample > smplNum) && (sw > 0)){ // try, remove worst
double d_mean = sd/sw;
int iworst = -1; int iworst = -1;
double dworst2 = 0.0; double dworst2 = 0.0;
for (int indxs = 0; indxs < smplLen; indxs++) if (smpl_sel[indxs]) { for (int i = 0; i < smplLen; i++) if (smpl_sel[i]) {
// double d2 = (smpl_d[i] - d_mean); double d2 = (smpl_d[i] - d_mean);
double d2 = smpl_d[indxs] - smpl_p[indxs];
d2 *=d2; d2 *=d2;
if (d2 > dworst2) { if (d2 > dworst2) {
if (notColinearWithout ( iworst = i;
indxs, // int indx, dworst2 = d2;
smpl_sel, // boolean [] sel,
smplSide)) { // int side))
iworst = indxs;
dworst2 = d2;
}
} }
} }
if (iworst < 0){ if (iworst < 0){
System.out.println("**** this is a BUG in getFilteredDisparityStrength() can not find the worst sample ****"); System.out.println("**** this is a BUG in getFilteredDisparityStrength() ****");
break; break;
} }
// remove worst sample // remove worst sample
smpl_sel[iworst] = false; smpl_sel[iworst] = false;
// double dw = smpl_d[iworst] * smpl_w[iworst]; double dw = smpl_d[iworst] * smpl_w[iworst];
// sd -= dw; sd -= dw;
// sd2 -= dw * smpl_d[iworst]; sd2 -= dw * smpl_d[iworst];
// sw -= smpl_w[iworst]; sw -= smpl_w[iworst];
sum_wnd -= smpl_weights[iworst]; sum_wnd -= smpl_weights[iworst];
num_in_sample --; num_in_sample --;
} else {
break;
}
} // removing worst tiles, all done,
// calculate variance of the remaining set
if (sw > 0.0) {
// sd /= sw;
// sd2 /= sw;
double var = sd2/sw; // - sd * sd;
if (var < smplVar) { // good, save in the result array
disp_strength[0][nTile] = d_center;
// ds[1][indx] = sw * lapWeight[dy][dx] /num_in_sample; // average weights, multiply by window //** TODO: change
disp_strength[1][nTile] = sw /sum_wnd; // average weights, multiply by window //** TODO: change
} }
} // calculate variance of the remaining set
if (sw > 0.0) {
} else { // fixed tilt sd /= sw;
// tilt around center sd2 /= sw;
if ((tiltXY != null) && (tiltXY[0] != 0.0) && (tiltXY[1] != 0.0)){ double var = sd2 - sd * sd;
for (int sy = 0; sy < smplSide; sy++){ if (var < smplVar) { // good, save in the result array
for (int sx = 0; sx < smplSide; sx++){ disp_strength[0][nTile] = sd;
int indxs = sy * smplSide + sx; // ds[1][indx] = sw * lapWeight[dy][dx] /num_in_sample; // average weights, multiply by window //** TODO: change
if (smpl_w[indxs] > 0.0) { disp_strength[1][nTile] = sw /sum_wnd; // average weights, multiply by window //** TODO: change
smpl_d[indxs] -= tiltXY[0]* (sx - smpl_dcenter) + tiltXY[1]* (sy - smpl_dcenter);
}
}
}
}
// calculate
double sd=0.0, sd2 = 0.0, sw = 0.0;
for (int i = 0; i < smplLen; i++) if (smpl_sel[i]) {
double dw = smpl_d[i] * smpl_w[i];
sd += dw;
sd2 += dw * smpl_d[i];
sw += smpl_w[i];
}
// remove worst, update sd2, sd and sw
while ((num_in_sample > smplNum) && (sw > 0)){ // try, remove worst
double d_mean = sd/sw;
int iworst = -1;
double dworst2 = 0.0;
for (int i = 0; i < smplLen; i++) if (smpl_sel[i]) {
double d2 = (smpl_d[i] - d_mean);
d2 *=d2;
if (d2 > dworst2) {
iworst = i;
dworst2 = d2;
} }
} else {
num_in_sample = 0;
System.out.println("**** this is a BUG in getFilteredDisparityStrength(), shoud not happen ? ****");
} }
if (iworst < 0){
System.out.println("**** this is a BUG in getFilteredDisparityStrength() ****");
break;
}
// remove worst sample
smpl_sel[iworst] = false;
double dw = smpl_d[iworst] * smpl_w[iworst];
sd -= dw;
sd2 -= dw * smpl_d[iworst];
sw -= smpl_w[iworst];
sum_wnd -= smpl_weights[iworst];
num_in_sample --;
} }
// calculate variance of the remaining set
if (sw > 0.0) {
sd /= sw;
sd2 /= sw;
double var = sd2 - sd * sd;
if (var < smplVar) { // good, save in the result array
disp_strength[0][nTile] = sd;
// ds[1][indx] = sw * lapWeight[dy][dx] /num_in_sample; // average weights, multiply by window //** TODO: change
disp_strength[1][nTile] = sw /sum_wnd; // average weights, multiply by window //** TODO: change
}
} else {
num_in_sample = 0;
System.out.println("**** this is a BUG in getFilteredDisparityStrength(), shoud not happen ? ****");
} }
} }
}
} }
} }
} }
...@@ -1583,7 +1668,7 @@ public class TileProcessor { ...@@ -1583,7 +1668,7 @@ public class TileProcessor {
for (int i = 0; i<dbg_img.length;i++){ for (int i = 0; i<dbg_img.length;i++){
dbg_img[i] = bgnd_tiles[i]?1:0; dbg_img[i] = bgnd_tiles[i]?1:0;
} }
sdfa_instance.showArrays(dbg_img, tilesX, tilesY, "tiles"); sdfa_instance.showArrays(dbg_img, tilesX, tilesY, "tiles_getBackgroundMask");
} }
} }
...@@ -1755,7 +1840,7 @@ public class TileProcessor { ...@@ -1755,7 +1840,7 @@ public class TileProcessor {
for (int i = 0; i<dbg_img.length;i++){ for (int i = 0; i<dbg_img.length;i++){
dbg_img[i] = bgnd_tiles[i]?1:0; dbg_img[i] = bgnd_tiles[i]?1:0;
} }
sdfa_instance.showArrays(dbg_img, tilesX, tilesY, "tiles"); sdfa_instance.showArrays(dbg_img, tilesX, tilesY, "tiles_getBackgroundMask_new");
} }
} }
...@@ -2768,7 +2853,9 @@ public class TileProcessor { ...@@ -2768,7 +2853,9 @@ public class TileProcessor {
public boolean [] FilterScan( public boolean [] FilterScan(
final CLTPass3d scan, final CLTPass3d scan,
final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0); final boolean [] bg_tiles, // get from selected in clt_3d_passes.get(0);
// final boolean [] border_tiles, // last measured boirder tiles // disabled below
final double ex_min_over, // when expanding over previously detected (by error) background, disregard far tiles
// final boolean [] border_tiles, // last measured border tiles
final CLTPass3d last_meas, // last measured scan (with border_tiles and getSecondMaxDiff final CLTPass3d last_meas, // last measured scan (with border_tiles and getSecondMaxDiff
final int [] horVertMod, // +1 - modified by hor correlation, +2 - modified by vert correlation (or null) final int [] horVertMod, // +1 - modified by hor correlation, +2 - modified by vert correlation (or null)
final double disparity_far, // final double disparity_far, //
...@@ -2841,16 +2928,18 @@ public class TileProcessor { ...@@ -2841,16 +2928,18 @@ public class TileProcessor {
} else { // in range } else { // in range
// Higher difference, higher the correlation strength is needed // Higher difference, higher the correlation strength is needed
// assuming this_strength is combine of the strength4 and hor/vert, apply to any // assuming this_strength is combine of the strength4 and hor/vert, apply to any
if ((this_strength[i] > ex_strength) || (horVertMod != null) && (horVertMod[i] != 0)){ // if (!bg_tiles[i] || (this_disparity[i] >= ex_min_over)) {
these_tiles[i] = true; if ((this_strength[i] > ex_strength) || (horVertMod != null) && (horVertMod[i] != 0)){
if (this_strength[i] < ex_strength* super_trust) { these_tiles[i] = true;
if (second_max_diff != null) { // only apply to the border tiles, so by next pass they () weak can be used? if (this_strength[i] < ex_strength* super_trust) {
if (border_tiles[i] && (this_strength[i] < second_max_diff[i] * ex_nstrength)) { if (second_max_diff != null) { // only apply to the border tiles, so by next pass they () weak can be used?
these_tiles[i] = false; if (border_tiles[i] && (this_strength[i] < second_max_diff[i] * ex_nstrength)) {
these_tiles[i] = false;
}
} }
} }
} }
} // }
// this one is not (yet) compared to normalized difference between the channels // this one is not (yet) compared to normalized difference between the channels
// if ((horVertMod != null) && (horVertMod[i] != 0)){ // if ((horVertMod != null) && (horVertMod[i] != 0)){
// these_tiles[i] = true; // these_tiles[i] = true;
...@@ -2858,19 +2947,22 @@ public class TileProcessor { ...@@ -2858,19 +2947,22 @@ public class TileProcessor {
} }
// combine with plates if available // combine with plates if available
if ((plate_ds != null) && (plate_ds[1][i] >=0) && (this_strength[i] < ex_strength* super_trust)) { // plate_ds[1][i] == -1 - no data, was not measured if ((plate_ds != null) && (plate_ds[1][i] >=0) && (this_strength[i] < ex_strength* super_trust)) { // plate_ds[1][i] == -1 - no data, was not measured
if (plate_ds[1][i] > 0){ // if (!bg_tiles[i] || (plate_ds[0][i] >= ex_min_over)) { // prevent expanding over correct background far tiles
if ( // !refined_selected[i] || // expanding over (wrong) background is only OK for near tiles
!these_tiles[i] || if (plate_ds[1][i] > 0){
(this_disparity[i] <= plate_ds[0][i]) || // keep old way detected (including poles) is closer, replace if raw is farther if ( // !refined_selected[i] ||
(this_strength[i] < plate_ds[1][i] * scale_filtered_strength_pre) // keep old way detected (including poles) is closer, replace if raw is farther !these_tiles[i] ||
) { (this_disparity[i] <= plate_ds[0][i]) || // keep old way detected (including poles) is closer, replace if raw is farther
these_tiles[i] = true; (this_strength[i] < plate_ds[1][i] * scale_filtered_strength_pre) // keep old way detected (including poles) is closer, replace if raw is farther
this_disparity[i] = plate_ds[0][i]; ) {
this_strength[i] = plate_ds[1][i]; these_tiles[i] = true;
} this_disparity[i] = plate_ds[0][i];
} else if (these_tiles[i]){ // no plates and raw is not reliably this_strength[i] = plate_ds[1][i];
these_tiles[i] = false; }
} } else if (these_tiles[i]){ // no plates and raw is not reliably
these_tiles[i] = false;
}
// }
} }
...@@ -3533,6 +3625,7 @@ public class TileProcessor { ...@@ -3533,6 +3625,7 @@ public class TileProcessor {
boolean [] these_tiles = FilterScan( boolean [] these_tiles = FilterScan(
scan_prev, // final CLTPass3d scan, scan_prev, // final CLTPass3d scan,
scan_bg.selected, // get from selected in clt_3d_passes.get(0); scan_bg.selected, // get from selected in clt_3d_passes.get(0);
clt_parameters.ex_min_over,// when expanding over previously detected (by error) background, disregard far tiles
// border_tiles, // final boolean [] border_tiles, // last measured boirder tiles // border_tiles, // final boolean [] border_tiles, // last measured boirder tiles
scan_lm, // final CLTPass3d last_meas, // last measured scan (with border_tiles and getSecondMaxDiff scan_lm, // final CLTPass3d last_meas, // last measured scan (with border_tiles and getSecondMaxDiff
replaced, // final int [] horVertMod, // +1 - modified by hor correlation, +2 - modified by vert correlation (or null) replaced, // final int [] horVertMod, // +1 - modified by hor correlation, +2 - modified by vert correlation (or null)
...@@ -4519,6 +4612,13 @@ public class TileProcessor { ...@@ -4519,6 +4612,13 @@ public class TileProcessor {
// Trying new class // Trying new class
LinkPlanes lp = new LinkPlanes (clt_parameters, st); LinkPlanes lp = new LinkPlanes (clt_parameters, st);
if (clt_parameters.show_planes){
showPlaneData(
"initial",
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
st, // SuperTiles st,
null); // TilePlanes.PlaneData[][][] split_planes
}
// condition supertiles (create and manage links, merge) // condition supertiles (create and manage links, merge)
lp.conditionSuperTiles( lp.conditionSuperTiles(
...@@ -4859,6 +4959,7 @@ public class TileProcessor { ...@@ -4859,6 +4959,7 @@ public class TileProcessor {
if (clt_parameters.show_planes){ if (clt_parameters.show_planes){
showPlaneData( showPlaneData(
"final",
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters, clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
st, // SuperTiles st, st, // SuperTiles st,
split_planes); // TilePlanes.PlaneData[][][] split_planes split_planes); // TilePlanes.PlaneData[][][] split_planes
...@@ -4895,12 +4996,13 @@ public class TileProcessor { ...@@ -4895,12 +4996,13 @@ public class TileProcessor {
public void showPlaneData( public void showPlaneData(
String suffix,
EyesisCorrectionParameters.CLTParameters clt_parameters, EyesisCorrectionParameters.CLTParameters clt_parameters,
SuperTiles st, SuperTiles st,
TilePlanes.PlaneData[][][] split_planes TilePlanes.PlaneData[][][] split_planes
) )
{ {
double [] split_lines = st.showSplitLines( double [] split_lines = (split_planes==null) ? null: st.showSplitLines(
split_planes, split_planes,
1, // final int debugLevel) 1, // final int debugLevel)
clt_parameters.tileX, clt_parameters.tileX,
...@@ -4961,7 +5063,9 @@ public class TileProcessor { ...@@ -4961,7 +5063,9 @@ public class TileProcessor {
plane_data[indx] = plane_data[indx-2].clone(); // java.lang.ArrayIndexOutOfBoundsException: -1 plane_data[indx] = plane_data[indx-2].clone(); // java.lang.ArrayIndexOutOfBoundsException: -1
for (int i = 0; i < plane_data[indx].length;i++){ for (int i = 0; i < plane_data[indx].length;i++){
if (Double.isNaN(plane_data[indx][i])) plane_data[indx][i] = 0.0; if (Double.isNaN(plane_data[indx][i])) plane_data[indx][i] = 0.0;
if (plane_data[indx-1][i] > 0) plane_data[indx][i] = Double.NaN; if ((plane_data[indx-1] != null) && (plane_data[indx] != null)) {
if (plane_data[indx-1][i] > 0) plane_data[indx][i] = Double.NaN;
}
} }
indx++; indx++;
plane_data[indx] = new double [wh[0]*wh[1]]; plane_data[indx] = new double [wh[0]*wh[1]];
...@@ -4969,19 +5073,31 @@ public class TileProcessor { ...@@ -4969,19 +5073,31 @@ public class TileProcessor {
int dbg_stx = (i % wh[0]) /superTileSize; int dbg_stx = (i % wh[0]) /superTileSize;
int dbg_sty = (i / wh[0]) /superTileSize; int dbg_sty = (i / wh[0]) /superTileSize;
int dbg_nsTile = dbg_stx + dbg_sty * (wh[0]/superTileSize); int dbg_nsTile = dbg_stx + dbg_sty * (wh[0]/superTileSize);
if (st.planes_mod[dbg_nsTile] == null){ if (st.planes_mod != null) {
plane_data[indx][i] = Double.NaN; if (st.planes_mod[dbg_nsTile] == null){
}else { plane_data[indx][i] = Double.NaN;
int dbg_nl = 0; }else {
for (int j = 0; j < st.planes_mod[dbg_nsTile].length; j++){ int dbg_nl = 0;
if (st.planes_mod[dbg_nsTile][j] != null) dbg_nl++; for (int j = 0; j < st.planes_mod[dbg_nsTile].length; j++){
if (st.planes_mod[dbg_nsTile][j] != null) dbg_nl++;
}
plane_data[indx][i] = dbg_nl;
}
} else if (st.planes != null) {
if (st.planes[dbg_nsTile] == null){
plane_data[indx][i] = Double.NaN;
}else {
int dbg_nl = 0;
for (int j = 0; j < st.planes[dbg_nsTile].length; j++){
if (st.planes[dbg_nsTile][j] != null) dbg_nl++;
}
plane_data[indx][i] = dbg_nl;
} }
plane_data[indx][i] = dbg_nl;
} }
} }
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging? showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
sdfa_instance.showArrays(plane_data, wh[0], wh[1], true, "plane_data"); sdfa_instance.showArrays(plane_data, wh[0], wh[1], true, "plane_data-"+suffix);
} }
...@@ -5278,6 +5394,7 @@ public class TileProcessor { ...@@ -5278,6 +5394,7 @@ public class TileProcessor {
these_tiles = FilterScan( these_tiles = FilterScan(
scan_prev, // final CLTPass3d scan, scan_prev, // final CLTPass3d scan,
scan_bg.selected, // get from selected in clt_3d_passes.get(0); scan_bg.selected, // get from selected in clt_3d_passes.get(0);
clt_parameters.ex_min_over,// when expanding over previously detected (by error) background, disregard far tiles
// border_tiles, // final boolean [] border_tiles, // last measured boirder tiles // border_tiles, // final boolean [] border_tiles, // last measured boirder tiles
scan_lm, // final CLTPass3d last_meas, // last measured scan (with border_tiles and getSecondMaxDiff scan_lm, // final CLTPass3d last_meas, // last measured scan (with border_tiles and getSecondMaxDiff
replaced, // final int [] horVertMod, // +1 - modified by hor correlation, +2 - modified by vert correlation (or null) replaced, // final int [] horVertMod, // +1 - modified by hor correlation, +2 - modified by vert correlation (or null)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment