Commit ee9c0264 authored by Andrey Filippov's avatar Andrey Filippov

Implemented main camera field correction from the DSI of the dual camera

rig.
parent b3afa1ab
......@@ -863,7 +863,7 @@ public class AlignmentCorrection {
* @param samples_list sample list generated by selectInfinityTiles method, each element references measurement series,
* tile index and (possibly modified) weight of each tile
* @param tilesX number of tiles in each data line
* @param magic_coeff still not understood coefficient that reduces reported disparity value. Seems to be around 0.85
* @param magic_coeff understood - interaction of the CM maximum and correlation window
* @param mismatch_list data to calculate extrinsic corrections or null
* @param debugLevel debug level
* @return per sub-camera, per direction (x,y) 6 quadratic polynomial coefficients, same format as fine_geometry_correction()
......@@ -1083,13 +1083,12 @@ B = |+dy0 -dy1 -2*dy3 |
double [] centerXY = {
tileX * qc.tp.getTileSize() + qc.tp.getTileSize()/2,// - shiftX;
tileY * qc.tp.getTileSize() + qc.tp.getTileSize()/2};//- shiftY;
double disparity_task = disp_scan_start + disp_scan_step * s.series;
double disparity_task = disp_scan_start + disp_scan_step * s.series; // Not needed even with known disparity
double disparity_meas = disp_strength[s.series * NUM_SLICES + 0][s.tile];
double strength = disp_strength[s.series * NUM_SLICES + 1][s.tile];
// final double disp_scan_start,
// final double disp_scan_step,
// final int disp_scan_count,
if (Double.isNaN(disparity_meas)) {
System.out.println("infinityMismatchCorrection(): disparity_meas=NaN: s.tile= "+s.tile);
}
mismatch_list.add(new Mismatch(
allow_dispatity && (s.series == 0), // true, //false, // public boolean use_disparity; // adjust dx0+dx1+dy0+dy1 == 0
centerXY,
......@@ -2234,13 +2233,14 @@ System.out.println("test1234");
}
ArrayList<Mismatch> mismatch_list = use_poly? null : (new ArrayList<Mismatch>());
// inf_and_ly here has filtered disparity and offsets, should be process clt_parameters.ly_inf_disp before filters
// for rig with known disparity - use series = 0 - it will allow disparity adjustment
double [][][] mismatch_corr_coefficients = infinityMismatchCorrection(
clt_parameters.disp_scan_start, // final double disp_scan_start,
clt_parameters.disp_scan_step, // final double disp_scan_step,
use_poly, // final boolean use_poly,
clt_parameters.fcorr_quadratic, // final boolean use_quadratic,
true, // clt_parameters.fcorr_inf_vert, // final boolean use_vertical,
// tool alte to restore disparity - should be dome earlier
// too late to restore disparity - should be dome earlier
false, // final boolean use_disparity, // for infinity
true, // clt_parameters.ly_inf_disp, //final boolean allow_dispatity,
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
......@@ -2310,6 +2310,477 @@ System.out.println("test1234");
return mismatch_corr_coefficients;
}
public double [][][] lazyEyeCorrectionFromGT(
final boolean use_poly, // Use polynomial correction, false - correct tilt/azimuth/roll of each sensor
final boolean restore_disp_inf, // Restore subtracted disparity for scan #0 (infinity) always true
final double fcorr_radius,
final double min_strength_in,
final double strength_pow,
final double lazyEyeCompDiff, // clt_parameters.fcorr_disp_diff
final int lazyEyeSmplSide, // = 2; // Sample size (side of a square)
final int lazyEyeSmplNum, // = 3; // Number after removing worst (should be >1)
final double lazyEyeSmplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
final double lazyEyeDispVariation, // maximal full disparity difference between the tile and 8 neighborxs
final double lazyEyeDispRelVariation,
final double ly_norm_disp, // = 5.0; // Reduce weight of higher disparity tiles
final int smplSide, // = 2; // Sample size (side of a square)
final int smplNum, // = 3; // Number after removing worst (should be >1)
final double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
// histogram parameters
final int hist_smpl_side, // 8 x8 masked, 16x16 sampled
final double hist_disp_min,
final double hist_disp_step,
final int hist_num_bins,
final double hist_sigma,
final double hist_max_diff,
final int hist_min_samples,
final boolean hist_norm_center, // if there are more tiles that fit than min_samples, replace with
final double inf_fraction, // fraction of the weight for the infinity tiles
final double inf_max_disparity, // use all smaller disparities as inf_fraction
EyesisCorrectionParameters.CLTParameters clt_parameters,
double [][] scans_14, // here - always 14 - infinity and non-infinity
double [][][] gt_disparity_strength, // 1 pair for each 14 entries of scans_14 (normally - just 1 scan
final boolean filter_ds, //
final boolean filter_lyf, // ~clt_parameters.lyf_filter, but may be different, now off for a single cameras
int tilesX,
double magic_coeff, // still not understood coefficient that reduces reported disparity value. Seems to be around 8.5
int debugLevel){
// final double lazyEyeDispRelVariation = 0.02;
final int dbg_nTile = -34145; // 37005; // -59038;
final int num_scans = scans_14.length/NUM_ALL_SLICES;
final int num_tiles = scans_14[0].length;
final int tilesY = num_tiles/tilesX;
final boolean [] center_mask = getCenterMask(fcorr_radius, tilesX, tilesY);
final double [][] scans = new double [num_scans * NUM_SLICES][];
// final double [][] comp_strength_rms = new double [num_scans][num_tiles];
for (int ns = 0; ns < num_scans; ns++){
final double [] min_weights = new double [num_tiles];
for (int nTile = 0; nTile < num_tiles; nTile++){
if (nTile == dbg_nTile) {
System.out.println("lazyEyeCorrectionFromGT(), nTile="+nTile);
}
double w = scans_14[ns * NUM_ALL_SLICES + INDEX_14_WEIGHT][nTile];
for (int i = 0; i < INDICES_14_WEIGHTS.length; i++) {
w = Math.min(w, scans_14[ns * NUM_ALL_SLICES + INDICES_14_WEIGHTS[i]][nTile]);
}
min_weights[nTile] = w * gt_disparity_strength[ns][1][nTile];
}
for (int i = 0; i < INDICES_14_10.length; i++){
if (i == INDEX_10_WEIGHT) {
scans[ns * NUM_SLICES + i] = min_weights;
} else {
scans[ns * NUM_SLICES + i] = scans_14[ns * NUM_ALL_SLICES + INDICES_14_10[i]];
}
}
}
if (debugLevel > 0) { // -2) { // 100) {
if (debugLevel > -3) { // -1) { // -2) { // 100) {
double [][] dbg_scans = new double[scans.length][];
for (int i = 0; i < dbg_scans.length;i++) {
dbg_scans[i] = scans[i].clone();
if (i != 1) {
for (int j = 0; j < dbg_scans[i].length; j++) if (scans[1][j]<=0.0) {
dbg_scans[i][j] = Double.NaN;
}
}
}
(new showDoubleFloatArrays()).showArrays(dbg_scans, tilesX, tilesY, true, "scans_pre-disp");
}
}
// Add disparity to dx0, dx1, dy2, dy3 pairs (here - always)
if ( restore_disp_inf) { // && false) { // ==clt_parameters.inf_restore_disp
for (int nTile = 0; nTile < num_tiles; nTile++) if (scans[INDEX_10_WEIGHT][nTile] > 0){
for (int i = 0; i < INDICES_10_DISP.length; i++) {
scans[INDICES_10_DISP[i]][nTile] += scans[INDEX_10_DISPARITY][nTile];
}
}
}
if (debugLevel > 0) { // -2) { // 100) {
if (debugLevel > -3) { // -1) { // -2) { // 100) {
double [][] dbg_scans = new double[scans.length][];
for (int i = 0; i < dbg_scans.length;i++) {
dbg_scans[i] = scans[i].clone();
if (i != 1) {
for (int j = 0; j < dbg_scans[i].length; j++) if (scans[1][j]<=0.0) {
dbg_scans[i][j] = Double.NaN;
}
}
}
(new showDoubleFloatArrays()).showArrays(dbg_scans, tilesX, tilesY, true, "scans_post-disp");
}
}
// FIXME: Seems that disparity should be combined with dxy for BG scan before that
// For GT - keep it here or remove?
double[][] filtered_scans;
if (filter_ds) {
filtered_scans = filterDisparityStrength (
scans, // final double[][] disp_strength_in, // [1][37006] >0, [21][37006] = NaN
min_strength_in, // final double strength_floor,
strength_pow, // final double strength_pow,
lazyEyeSmplSide, // final int smplSide, // = 2; // Sample size (side of a square)
lazyEyeSmplNum, // final int smplNum, // = 3; // Number after removing worst (should be >1)
lazyEyeSmplRms, // final double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
tilesX);// final int tilesX);
} else {
filtered_scans = scans;
}
if (debugLevel > 0) { // -2) { // 100) {
{
double [][] dbg_scans = new double[scans.length][];
for (int i = 0; i < dbg_scans.length;i++) {
dbg_scans[i] = filtered_scans[i].clone();
if (i != 1) {
for (int j = 0; j < dbg_scans[i].length; j++) if (filtered_scans[1][j]<=0.0) {
dbg_scans[i][j] = Double.NaN;
}
}
}
(new showDoubleFloatArrays()).showArrays(dbg_scans, tilesX, tilesY, true, "filtered_scans");
}
if (debugLevel > -3) { // -1) { // -2) { // 100) {
double [][] dbg_scans = new double[scans.length][];
for (int i = 0; i < dbg_scans.length;i++) {
dbg_scans[i] = scans[i].clone();
if (i != 1) {
for (int j = 0; j < dbg_scans[i].length; j++) if (scans[1][j]<=0.0) {
dbg_scans[i][j] = Double.NaN;
}
}
}
(new showDoubleFloatArrays()).showArrays(dbg_scans, tilesX, tilesY, true, "scans-after");
(new showDoubleFloatArrays()).showArrays(gt_disparity_strength[0], tilesX, tilesY, true, "gt_disparity_strength");
}
}
if (debugLevel > -2) {
System.out.println("lazyEyeCorrectionFromGT() 1: removing tiles with residual disparity absoulte value > "+lazyEyeCompDiff);
}
double [][] combo_mismatch = new double [NUM_SLICES][num_tiles];
for (int ns = 0; ns < num_scans; ns++){
for (int nTile = 0; nTile < num_tiles; nTile++) {
if ((nTile == dbg_nTile) || (nTile == 24971)){
System.out.println("lazyEyeCorrectionFromGT().1: nTile="+nTile); // filtered_scans[2][37005] = NaN
}
// double w = filtered_scans[ns * NUM_SLICES + 1][nTile];
// if ((w > 0.0) && (gt_disparity_strength[ns][1][nTile] > 0.0)){ // filtered strength may be non-zero where gt_disparity_strength[ns][1][nTile] is -> NaN
// reversing - use GT strength, but skip if there is no filtered?
double w = gt_disparity_strength[ns][1][nTile]; // GT data
if ((w > 0.0) && (filtered_scans[ns * NUM_SLICES + 1][nTile] > 0.0)){ // filtered strength may be non-zero where gt_disparity_strength[ns][1][nTile] is -> NaN
double disp = filtered_scans[ns * NUM_SLICES + 0][nTile];
if (Math.abs(disp) <= lazyEyeCompDiff) {
for (int i = 2; i < NUM_SLICES; i++) if (i != 1){
combo_mismatch[i][nTile] += filtered_scans[ns * NUM_SLICES + i][nTile] * w;
}
//FIXME: ???? target_disparity is not 0 for bg
// combo_mismatch combines both infinity and regular for the same tile, mixing "disparity" and "target disparity" with weights and magic_scale
// Seems to be wrong, as target_disparity is only estimated disparity, not measured. Or is it measured for non-infinity?
// At least bg scan is measured with disparity =0, even as target_disparity is not 0
// combo data is later used as a non-infinity to correct all but disparity
combo_mismatch[0][nTile] += gt_disparity_strength[ns][0][nTile] * w;
combo_mismatch[1][nTile] += w;
}
}
}
}
for (int nTile = 0; nTile < num_tiles; nTile++) {
if (nTile == dbg_nTile){
System.out.println("lazyEyeCorrectionFromGT().2: nTile="+nTile);
}
double w = combo_mismatch[1][nTile];
if (w > 0.0){
for (int i = 0; i < NUM_SLICES; i++) if (i != 1){
combo_mismatch[i][nTile] /= w;
}
} else {
for (int i = 0; i < NUM_SLICES; i++) if (i != 1){
combo_mismatch[i][nTile] = Double.NaN;
}
}
}
// reduce influence of high disparity, using combined disparity
// double norm_ly_disparity = 100.0; // disabling
for (int nTile = 0; nTile < num_tiles; nTile++) {
if ((combo_mismatch[0][nTile] > 0) && (combo_mismatch[0][nTile] > ly_norm_disp)) { // why 1-st term?
combo_mismatch[1][nTile] *= ly_norm_disp/combo_mismatch[0][nTile];
}
}
// instance of class to operate navigation over tiles
// compare tile disparity (combo) with those of neighbors, discard if too different
final TileNeibs tnImage = new TileNeibs(tilesX, tilesY); // num_tiles/tilesX);
for (int nTile = 0; nTile < num_tiles; nTile++) if (combo_mismatch[1][nTile] > 0.0){
if (nTile == dbg_nTile){
System.out.println("lazyEyeCorrectionFromGT().3: nTile="+nTile);
}
double d = combo_mismatch[0][nTile];
double lev = lazyEyeDispVariation + lazyEyeDispRelVariation * d;
for (int dir = 0; dir <8; dir++){
int nTile1 = tnImage.getNeibIndex(nTile, dir);
if ((nTile1 >= 0) && (combo_mismatch[1][nTile1] > 0.0)){
if (Math.abs(combo_mismatch[0][nTile1] - d) > lev) {
combo_mismatch[1][nTile] = 0.0;
combo_mismatch[0][nTile] = Double.NaN;
for (int i = 2; i < NUM_SLICES; i++) if (i != 1){
combo_mismatch[i][nTile] = Double.NaN;
}
break;
}
}
}
}
// here combo_mismatch[2][37005] = Double.NaN,combo_mismatch[1][37005] != 0.0, combo_mismatch[0][37005] = 0.0
if (debugLevel > 0) { // 0) {
String [] prefixes = {"disparity", "strength", "dx0", "dy0", "dx1", "dy1", "dx2", "dy2", "dx3", "dy3"};
(new showDoubleFloatArrays()).showArrays(combo_mismatch, tilesX, combo_mismatch[0].length/tilesX, true, "combo_mismatch" , prefixes);
}
// if (clt_parameters.lyf_filter) {
if (filter_lyf) {
combo_mismatch = filterLazyEyePairs (
combo_mismatch, // final double[][] samples_in,
clt_parameters.lyf_smpl_side , // 8, // final int smpl_side, // 8 x8 masked, 16x16 sampled
clt_parameters.lyf_rms_max , // 0.25, // final double rms_max, TODO: find reasonable one not critical?
clt_parameters.lyf_frac_keep , // 0.5, // final double frac_keep,
clt_parameters.lyf_min_samples , // 5, // final int min_samples,
clt_parameters.lyf_norm_center , // true, // final boolean norm_center, // if there are more tiles that fit than minsamples, replace with a single equal weight
tilesX); // final int tilesX);
}
if (debugLevel > 0) {
String [] prefixes = {"disparity", "strength", "dx0", "dy0", "dx1", "dy1", "dx2", "dy2", "dx3", "dy3"};
(new showDoubleFloatArrays()).showArrays(combo_mismatch, tilesX, combo_mismatch[0].length/tilesX, true, "filtered_mismatch" , prefixes);
}
// no need to extract and filter infinity data
// make all zero strength tiles to have NaN values to use histrograms in ImageJ
for (int nt = 0; nt < combo_mismatch[INDEX_10_WEIGHT].length; nt++ ) {
if (combo_mismatch[INDEX_10_WEIGHT][nt] == 0.0) {
for (int i = 0; i < NUM_SLICES; i++) if (i != INDEX_10_WEIGHT){
combo_mismatch[i][nt] = Double.NaN;
}
}
}
// static final int INDEX_10_WEIGHT = 1;
System.out.println("test123");
if ((debugLevel > -1) && (hist_smpl_side > 0)) { // 0) {
String [] titles = {"disparity", "strength", "dx0", "dy0", "dx1", "dy1", "dx2", "dy2", "dx3", "dy3"};
(new showDoubleFloatArrays()).showArrays(combo_mismatch, tilesX, tilesY, true, "inf_and_ly",titles);
int step = hist_smpl_side; // should be the same for both filters
int tilesX1 = tilesX/step;
int tilesY1 = tilesY/step;
int num_tiles1 = tilesX1 * tilesY1;
double [][] dbg_img = new double [combo_mismatch.length][num_tiles1];
for (int tY = 0; tY < tilesY1; tY++) {
for (int tX = 0; tX < tilesX1; tX++) {
int nTile1 = tX + tY*tilesX1;
for (int sY = 0; sY < step; sY ++) {
for (int sX = 0; sX < step; sX ++) {
int nTile = (sX + step * tX) + (sY + step * tY) * tilesX;
double w = combo_mismatch[1][nTile];
if (w > 0.0){
for (int i = 0; i < NUM_SLICES; i++) if (i != 1) {
dbg_img[i][nTile1] += w * combo_mismatch[i][nTile];
}
dbg_img[1][nTile1] += w;
}
}
}
double w = dbg_img[1][nTile1];
if (w > 0.0){
for (int i = 0; i < NUM_SLICES; i++) if (i != 1) {
dbg_img[i][nTile1] /= w;
}
} else {
for (int i = 0; i < NUM_SLICES; i++) if (i != 1) {
dbg_img[i][nTile1] = Double.NaN;
}
}
}
}
(new showDoubleFloatArrays()).showArrays(dbg_img, tilesX1, tilesY1, true, "inf_and_ly8",titles);
}
System.out.println("test1234a");
// create list for infinity data
// /clt_parameters.ly_inf_en,
// adjust weight to balance infinity data and lazy eye one, so "infinity" (or really far) tiles impact is not too small even
// if there is little of infinity in the scene. As the ground truth (rig data) is known, infinity does not need to be guessed
// from the images
// final double inf_fraction, // fraction of the weight for the infinity tiles
// final double inf_max_disparity, // use all smaller disparities as inf_fraction
double [] total_weights = new double[2];
for (int nTile = 0; nTile < combo_mismatch[INDEX_10_WEIGHT].length; nTile++ ) if (center_mask[nTile]){
if (combo_mismatch[INDEX_10_DISPARITY][nTile] <= inf_max_disparity) {
total_weights[0] += combo_mismatch[INDEX_10_WEIGHT][nTile];
} else {
total_weights[1] += combo_mismatch[INDEX_10_WEIGHT][nTile];
}
}
double inf_fraction_limited = (inf_fraction >= 0.0) ?((inf_fraction > 1.0) ? 1.0 : inf_fraction):0.0;
double [] weights = {
inf_fraction_limited * (total_weights[0] + total_weights[1]) / total_weights[0],
(1.0 - inf_fraction_limited) * (total_weights[0] + total_weights[1]) / total_weights[1],
};
if (weights[0]> weights[1]) {
if (debugLevel>-1) {
System.out.println("Boosting weights of far tiles (weights[0]="+weights[0]+", weights[1]="+weights[1]+", so keeping original weights");
}
for (int nTile = 0; nTile < num_tiles; nTile++) {
if (combo_mismatch[INDEX_10_DISPARITY][nTile] <= inf_max_disparity) {
combo_mismatch[1][nTile] *= weights[0];
} else {
combo_mismatch[1][nTile] *= weights[1];
}
}
} else {
if (debugLevel>-1) {
System.out.println("There are already more far tiles than requested (weights[0]="+weights[0]+", weights[1]="+weights[1]+", so keeping original weights");
}
}
ArrayList<Sample> samples_list = new ArrayList<Sample>();
for (int nTile = 0; nTile < num_tiles; nTile++) if (combo_mismatch[INDEX_10_WEIGHT][nTile] > 0.0) {
samples_list.add(new Sample(0, nTile, combo_mismatch[INDEX_10_WEIGHT][nTile])); // first should be 0 to use disparity
if (Double.isNaN(combo_mismatch[INDEX_10_DISPARITY][nTile] )) {
System.out.println("lazyEyeCorrectionFromGT(): Double.isNaN(combo_mismatch["+INDEX_10_DISPARITY+"]["+nTile+"])");
}
}
if (debugLevel > 1) {
double inf_weight = 0.0;
for (Sample s: samples_list) {
inf_weight += s.weight;
}
System.out.println("lazyEyeCorrectionFromGT(): number of all samples="+samples_list.size()+", total weight = "+inf_weight);
}
if (debugLevel > 1) {
String [] titles = {"disparity", "strength", "dx0", "dy0", "dx1", "dy1", "dx2", "dy2", "dx3", "dy3"};
(new showDoubleFloatArrays()).showArrays(filtered_scans, tilesX, tilesY, true, "filtered_scans_a" , titles);
}
if (debugLevel > 1) {
String [] prefixes = {"disparity", "strength", "dx0", "dy0", "dx1", "dy1", "dx2", "dy2", "dx3", "dy3"};
(new showDoubleFloatArrays()).showArrays(combo_mismatch, tilesX, combo_mismatch[0].length/tilesX, true, "combo_mismatch" , prefixes);
}
ArrayList<Mismatch> mismatch_list = use_poly? null : (new ArrayList<Mismatch>());
// inf_and_ly here has filtered disparity and offsets, should be process clt_parameters.ly_inf_disp before filters
// for rig with known disparity - use series = 0 - it will allow disparity adjustment
double [][][] mismatch_corr_coefficients = infinityMismatchCorrection(
clt_parameters.disp_scan_start, // final double disp_scan_start,
clt_parameters.disp_scan_step, // final double disp_scan_step,
use_poly, // final boolean use_poly,
clt_parameters.fcorr_quadratic, // final boolean use_quadratic,
true, // clt_parameters.fcorr_inf_vert, // final boolean use_vertical,
// too late to restore disparity - should be dome earlier
false, // final boolean use_disparity, // for infinity
true, // clt_parameters.ly_inf_disp, //final boolean allow_dispatity,
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
combo_mismatch, // double [][] disp_strength,
samples_list, // ArrayList<Sample> samples_list,
tilesX, // int tilesX,
magic_coeff, // double , // still not understood coefficient that reduces reported disparity value. Seems to be around 8.5
mismatch_list, // ArrayList<Mismatch> mismatch_list,
debugLevel); // int debugLevel)
if (debugLevel > -2) {
System.out.println("===== lazyEyeCorrectionFromGT(): correction coefficients =====");
if (mismatch_corr_coefficients != null) {
show_fine_corr(
mismatch_corr_coefficients,
"mismatch_corr_coefficients");
} else {
System.out.println("Are null - non-null are for poly correction only");
}
}
if (!use_poly && (mismatch_list != null)){
double [] old_new_rms = new double[1];
boolean apply_extrinsic = true;
int solveCorr_debug = ((clt_parameters.lym_iter == 1) && (clt_parameters.ly_par_sel != 0))? 2 : debugLevel;
GeometryCorrection.CorrVector corr_vector = solveCorr (
clt_parameters.ly_inf_en, // boolean use_disparity, // if true will ignore disparity data even if available (was false)
clt_parameters.ly_aztilt_en,// boolean use_aztilts, // Adjust azimuths and tilts excluding disparity
clt_parameters.ly_diff_roll_en,// boolean use_diff_rolls, // Adjust differential rolls (3 of 4 angles)
clt_parameters.ly_inf_force, // boolean force_convergence, // if true try to adjust convergence (disparity, symmetrical parameter 0) even with no disparity
clt_parameters.ly_com_roll, // boolean common_roll, // Enable common roll (valid for high disparity range only)
clt_parameters.ly_focalLength, // boolean corr_focalLength, // Correct scales (focal length temperature? variations)
clt_parameters.ly_par_sel, //int manual_par_sel, // Manually select the parameter mask bit 0 - sym0, bit1 - sym1, ... (0 - use boolean flags, != 0 - ignore boolean flags)
mismatch_list, // ArrayList<Mismatch> mismatch_list,
qc.geometryCorrection, // GeometryCorrection geometryCorrection,
qc.geometryCorrection.getCorrVector(), // GeometryCorrection.CorrVector corr_vector,
old_new_rms, // double [] old_new_rms, // should be double[2]
// 2); // debugLevel); // 2); // 1); // int debugLevel)
solveCorr_debug); // debugLevel); // 2); // 1); // int debugLevel)
//TODO: ** Put 2 here to debug derivative images (diff_dmv_dsym - does not match yet, probably different "addition" of angles)
if (debugLevel > -1){
System.out.println("Old extrinsic corrections:");
System.out.println(qc.geometryCorrection.getCorrVector().toString());
System.out.println("Delta extrinsic corrections:");
System.out.println(corr_vector.toString());
}
if (apply_extrinsic){
qc.geometryCorrection.getCorrVector().incrementVector(corr_vector, clt_parameters.ly_corr_scale);
if (debugLevel > -1){
System.out.println("New extrinsic corrections:");
System.out.println(qc.geometryCorrection.getCorrVector().toString());
}
}
mismatch_corr_coefficients = new double [1][2][];
mismatch_corr_coefficients[0][0] = corr_vector.toSymArray(null);
mismatch_corr_coefficients[0][1] = old_new_rms;
} else {
if (debugLevel > -2){
System.out.println("Extrinsic parameters (tilt, azimuth, roll) of subcameras is disabled, use_poly="+
use_poly+" (should be false for extrinsics)");
System.out.println(qc.geometryCorrection.getCorrVector().toString());
}
return mismatch_corr_coefficients;
}
return mismatch_corr_coefficients;
}
public double [][] combineCltMismatches(
EyesisCorrectionParameters.CLTParameters clt_parameters,
double [][][] clt_mismatches,
......@@ -2458,6 +2929,9 @@ System.out.println("test1234");
for (int n = 0; n < 2* NUM_SENSORS; n++){
// jt[npar][2 * NUM_SENSORS * indx + n] = j_partial[n][npar]; // here Jacobian was not transposed
jt[npar][2 * NUM_SENSORS * indx + n] = jt_partial[npar][n];
if (Double.isNaN(jt_partial[npar][n])) {
System.out.println("getJacobianTransposed(): npar="+npar+", indx="+indx+", n="+n);
}
}
}
if (debugLevel > 0){
......@@ -2700,6 +3174,9 @@ System.out.println("test1234");
for (int j = i; j < jt.length; j++){
for (int k = 0; k < jt[0].length; k++){
jtj[i][j] += jt[i][k] * jt[j][k] * w[k];
if (Double.isNaN(jtj[i][j])) {
System.out.println("i="+i+", j="+j+", k="+k);
}
}
}
}
......
......@@ -2856,11 +2856,29 @@ public class EyesisCorrectionParameters {
public double gr_unique_tol = 0.15; // Do not re-measure correlation if target disparity differs from some previous by this
public double gr_unique_pretol = 0.5; // Larger tolerance for expanding (not refining)
public boolean ft_mod_strength = true; // When set, multiply each tile strength by the number of selected neighbors
public boolean ft_clusterize_by_highest = true; // Clusterize using disparity horizontal maximums for fronto planes and minimums - for horizontal. False - use histograms
public double ft_clust_sigma = 0.7; // Blur disparity before argmax/argmin for initial clusterization
public double ft_disp_arange_vert = 0.07; // Absolute disparity range for fronto clusters
public double ft_disp_rrange_vert = 0.01; // Relative disparity range for fronto clusters
public double ft_disp_arange_hor = 0.035; // Absolute disparity range for horizontal clusters
public double ft_disp_rrange_hor = 0.005; // Relative disparity range for horizontal clusters
public double ft_tolerance_above_near = 100.0; // Actual disparity positive tolerance over blurred disparity argmax range
public double ft_tolerance_below_near = -0.01; // Actual disparity negative tolerance under blurred disparity argmax range
public double ft_tolerance_above_far = 0.07; // Actual disparity positive tolerance over blurred disparity argmin range
public double ft_tolerance_below_far = 0.1; // Actual disparity negative tolerance under blurred disparity argmin range
public int ft_hor_vert_overlap = 2; // Allow clusters tile sharing between fronto and horizontal. 2 - 1 tile in 8 directions, 1 - 1 tile in 4 directions
public int ft_used_companions = 5; // Cell that has this many new used companions is considered used (borders and already use3d are considered used too)
public int ft_used_true_companions = 1; // There should be at least this many new selected tiles among neighbors.,
public boolean plPreferDisparity = false;// Always start with disparity-most axis (false - lowest eigenvalue)
public double plDispNorm = 5.0; // Normalize disparities to the average if above (now only for eigenvalue comparison)
public double plFrontoTol = 0.0; // for compatibility with old //0.1; // Fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
public double plFrontoRms = 0.05; // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
public double plFrontoOffs = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
public double PlFrontoPow = 1.0; // increase weight even more
public double plBlurBinVert = 1.2; // Blur disparity histograms for constant disparity clusters by this sigma (in bins)
public double plBlurBinHor = 0.8; // Blur disparity histograms for horizontal clusters by this sigma (in bins)
public double plMaxDiffVert = 0.4; // Maximal normalized disparity difference when initially assigning to vertical plane
......@@ -3100,6 +3118,8 @@ public class EyesisCorrectionParameters {
public boolean replaceWeakOutliers = true; // false;
public boolean debug_initial_discriminate = false;
public boolean dbg_migrate = true;
// other debug images
......@@ -3522,11 +3542,27 @@ public class EyesisCorrectionParameters {
properties.setProperty(prefix+"gr_unique_tol", this.gr_unique_tol +"");
properties.setProperty(prefix+"gr_unique_pretol", this.gr_unique_pretol +"");
properties.setProperty(prefix+"ft_mod_strength", this.ft_mod_strength +"");
properties.setProperty(prefix+"ft_clusterize_by_highest", this.ft_clusterize_by_highest +"");
properties.setProperty(prefix+"ft_clust_sigma", this.ft_clust_sigma +"");
properties.setProperty(prefix+"ft_disp_arange_vert", this.ft_disp_arange_vert +"");
properties.setProperty(prefix+"ft_disp_rrange_vert", this.ft_disp_rrange_vert +"");
properties.setProperty(prefix+"ft_disp_arange_hor", this.ft_disp_arange_hor +"");
properties.setProperty(prefix+"ft_disp_rrange_hor", this.ft_disp_rrange_hor +"");
properties.setProperty(prefix+"ft_tolerance_above_near", this.ft_tolerance_above_near +"");
properties.setProperty(prefix+"ft_tolerance_below_near", this.ft_tolerance_below_near +"");
properties.setProperty(prefix+"ft_tolerance_above_far", this.ft_tolerance_above_far +"");
properties.setProperty(prefix+"ft_tolerance_below_far", this.ft_tolerance_below_far +"");
properties.setProperty(prefix+"ft_hor_vert_overlap", this.ft_hor_vert_overlap +"");
properties.setProperty(prefix+"ft_used_companions", this.ft_used_companions +"");
properties.setProperty(prefix+"ft_used_true_companions", this.ft_used_true_companions +"");
properties.setProperty(prefix+"plPreferDisparity",this.plPreferDisparity+"");
properties.setProperty(prefix+"plDispNorm", this.plDispNorm +"");
properties.setProperty(prefix+"plFrontoTol", this.plFrontoTol +"");
properties.setProperty(prefix+"plFrontoRms", this.plFrontoRms +"");
properties.setProperty(prefix+"plFrontoOffs", this.plFrontoOffs +"");
properties.setProperty(prefix+"PlFrontoPow", this.PlFrontoPow +"");
properties.setProperty(prefix+"plBlurBinVert", this.plBlurBinVert +"");
properties.setProperty(prefix+"plBlurBinHor", this.plBlurBinHor +"");
......@@ -3741,7 +3777,8 @@ public class EyesisCorrectionParameters {
properties.setProperty(prefix+"taEnMismatch", this.taEnMismatch +"");
properties.setProperty(prefix+"dbg_migrate", this.dbg_migrate+"");
properties.setProperty(prefix+"debug_initial_discriminate", this.debug_initial_discriminate+"");
properties.setProperty(prefix+"dbg_migrate", this.dbg_migrate+"");
properties.setProperty(prefix+"dbg_early_exit", this.dbg_early_exit+"");
properties.setProperty(prefix+"show_first_bg", this.show_first_bg+"");
......@@ -4168,11 +4205,30 @@ public class EyesisCorrectionParameters {
if (properties.getProperty(prefix+"gr_unique_tol")!=null) this.gr_unique_tol=Double.parseDouble(properties.getProperty(prefix+"gr_unique_tol"));
if (properties.getProperty(prefix+"gr_unique_pretol")!=null) this.gr_unique_pretol=Double.parseDouble(properties.getProperty(prefix+"gr_unique_pretol"));
if (properties.getProperty(prefix+"ft_mod_strength")!=null) this.ft_mod_strength=Boolean.parseBoolean(properties.getProperty(prefix+"ft_mod_strength"));
if (properties.getProperty(prefix+"ft_clusterize_by_highest")!=null) this.ft_clusterize_by_highest=Boolean.parseBoolean(properties.getProperty(prefix+"ft_clusterize_by_highest"));
if (properties.getProperty(prefix+"ft_clust_sigma")!=null) this.ft_clust_sigma=Double.parseDouble(properties.getProperty(prefix+"ft_clust_sigma"));
if (properties.getProperty(prefix+"ft_disp_arange_vert")!=null) this.ft_disp_arange_vert=Double.parseDouble(properties.getProperty(prefix+"ft_disp_arange_vert"));
if (properties.getProperty(prefix+"ft_disp_rrange_vert")!=null) this.ft_disp_rrange_vert=Double.parseDouble(properties.getProperty(prefix+"ft_disp_rrange_vert"));
if (properties.getProperty(prefix+"ft_disp_arange_hor")!=null) this.ft_disp_arange_hor=Double.parseDouble(properties.getProperty(prefix+"ft_disp_arange_hor"));
if (properties.getProperty(prefix+"ft_disp_rrange_hor")!=null) this.ft_disp_rrange_hor=Double.parseDouble(properties.getProperty(prefix+"ft_disp_rrange_hor"));
if (properties.getProperty(prefix+"ft_tolerance_above_near")!=null) this.ft_tolerance_above_near=Double.parseDouble(properties.getProperty(prefix+"ft_tolerance_above_near"));
if (properties.getProperty(prefix+"ft_tolerance_below_near")!=null) this.ft_tolerance_below_near=Double.parseDouble(properties.getProperty(prefix+"ft_tolerance_below_near"));
if (properties.getProperty(prefix+"ft_tolerance_above_far")!=null) this.ft_tolerance_above_far=Double.parseDouble(properties.getProperty(prefix+"ft_tolerance_above_far"));
if (properties.getProperty(prefix+"ft_tolerance_below_far")!=null) this.ft_tolerance_below_far=Double.parseDouble(properties.getProperty(prefix+"ft_tolerance_below_far"));
if (properties.getProperty(prefix+"ft_hor_vert_overlap")!=null) this.ft_hor_vert_overlap=Integer.parseInt(properties.getProperty(prefix+"ft_hor_vert_overlap"));
if (properties.getProperty(prefix+"ft_used_companions")!=null) this.ft_used_companions=Integer.parseInt(properties.getProperty(prefix+"ft_used_companions"));
if (properties.getProperty(prefix+"ft_used_true_companions")!=null) this.ft_used_true_companions=Integer.parseInt(properties.getProperty(prefix+"ft_used_true_companions"));
if (properties.getProperty(prefix+"plPreferDisparity")!=null) this.plPreferDisparity=Boolean.parseBoolean(properties.getProperty(prefix+"plPreferDisparity"));
if (properties.getProperty(prefix+"plDispNorm")!=null) this.plDispNorm=Double.parseDouble(properties.getProperty(prefix+"plDispNorm"));
if (properties.getProperty(prefix+"plFrontoTol")!=null) this.plFrontoTol=Double.parseDouble(properties.getProperty(prefix+"plFrontoTol"));
if (properties.getProperty(prefix+"plFrontoRms")!=null) this.plFrontoRms=Double.parseDouble(properties.getProperty(prefix+"plFrontoRms"));
if (properties.getProperty(prefix+"plFrontoOffs")!=null) this.plFrontoOffs=Double.parseDouble(properties.getProperty(prefix+"plFrontoOffs"));
if (properties.getProperty(prefix+"PlFrontoPow")!=null) this.PlFrontoPow=Double.parseDouble(properties.getProperty(prefix+"PlFrontoPow"));
if (properties.getProperty(prefix+"plBlurBinVert")!=null) this.plBlurBinVert=Double.parseDouble(properties.getProperty(prefix+"plBlurBinVert"));
if (properties.getProperty(prefix+"plBlurBinHor")!=null) this.plBlurBinHor=Double.parseDouble(properties.getProperty(prefix+"plBlurBinHor"));
......@@ -4390,7 +4446,8 @@ public class EyesisCorrectionParameters {
if (properties.getProperty(prefix+"taEnMismatch")!=null) this.taEnMismatch=Boolean.parseBoolean(properties.getProperty(prefix+"taEnMismatch"));
if (properties.getProperty(prefix+"dbg_migrate")!=null) this.dbg_migrate=Boolean.parseBoolean(properties.getProperty(prefix+"dbg_migrate"));
if (properties.getProperty(prefix+"debug_initial_discriminate")!=null) this.debug_initial_discriminate=Boolean.parseBoolean(properties.getProperty(prefix+"debug_initial_discriminate"));
if (properties.getProperty(prefix+"dbg_migrate")!=null) this.dbg_migrate=Boolean.parseBoolean(properties.getProperty(prefix+"dbg_migrate"));
if (properties.getProperty(prefix+"dbg_early_exit")!=null) this.dbg_early_exit=Integer.parseInt(properties.getProperty(prefix+"dbg_early_exit"));
if (properties.getProperty(prefix+"show_first_bg")!=null) this.show_first_bg=Boolean.parseBoolean(properties.getProperty(prefix+"show_first_bg"));
......@@ -4904,6 +4961,37 @@ public class EyesisCorrectionParameters {
gd.addNumericField("Do not re-measure correlation if target disparity differs from some previous less", this.gr_unique_tol, 6);
gd.addNumericField("Larger tolerance for expanding (not refining)", this.gr_unique_pretol, 6);
gd.addTab ("Alt CLusterize", "Alternative initial tiles clusterization");
gd.addCheckbox ("Modify cluster strengths", this.ft_mod_strength,
"Supplement sum of strengths with other parameters, such as density and height ");
gd.addCheckbox ("Enable alternative initial tile clusterization", this.ft_clusterize_by_highest,
"Clusterize using disparity horizontal maximums for fronto planes and minimums - for horizontal. False - use histograms");
gd.addNumericField("Disparity blur sigma", this.ft_clust_sigma, 4, 6,"pix",
"Blur disparity before finding each line max (for fronto planes ) or min (for horizontal planes) during initial clusterization");
gd.addNumericField("Absolute disparity range for fronto clusters", this.ft_disp_arange_vert, 4, 6,"pix",
"Disparity range for blurred disparity (down from max disparity) for fronto planes");
gd.addNumericField("Relative disparity range for fronto clusters", this.ft_disp_rrange_vert, 4, 6,"pix/pix",
"Increase disparity range for fronto clusters for each disparity pixel");
gd.addNumericField("Absolute disparity range for horizontal clusters", this.ft_disp_arange_hor, 4, 6,"pix",
"Disparity range for blurred disparity (up from min disparity to horizontal difference) for horizontal planes");
gd.addNumericField("Relative disparity range for horizontal clusters", this.ft_disp_rrange_hor, 4, 6,"pix/pix",
"Increase disparity range for horizontal clusters for each disparity pixel");
gd.addNumericField("Actual disparity positive tolerance over blurred disparity max range", this.ft_tolerance_above_near, 4, 6,"pix",
"Allow measured tile disparity above cluster disparity range for fronto clusters");
gd.addNumericField("Actual disparity negative tolerance over blurred disparity max range", this.ft_tolerance_below_near, 4, 6,"pix",
"Allow measured tile disparity below cluster disparity range for fronto planes");
gd.addNumericField("Actual disparity positive tolerance over blurred disparity min range", this.ft_tolerance_above_far, 4, 6,"pix",
"Allow measured tile disparity above cluster disparity range for horizontal planes");
gd.addNumericField("Actual disparity negative tolerance over blurred disparity min range", this.ft_tolerance_below_far, 4, 6,"pix",
"Allow measured tile disparity below cluster disparity range for horizontal planes");
gd.addNumericField("Fronto/horizontal selections overlap", this.ft_hor_vert_overlap, 0,6,"",
"Allow clusters tile sharing between fronto and horizontal. 2 - 1 tile in 8 directions, 1 - 1 tile in 4 directions");
gd.addNumericField("Mark as used if has used/disabled neighbors ", this.ft_used_companions, 0,6,"",
"Cell that has this many new used companions is considered used (borders and already use3d are considered used too)");
gd.addNumericField("Minimal number of new used cells among new/old used and marginal tiles", this.ft_used_true_companions, 0,6,"",
"There should be at least this many new selected tiles among neighbors");
gd.addTab ("Plane Det", "Planes detection");
gd.addMessage ("--- Planes detection ---");
......@@ -4913,8 +5001,10 @@ public class EyesisCorrectionParameters {
"Fronto tolerance (pix) - treat almost fronto planes as fronto (constant disparity). If <= 0 - disable this feature");
gd.addNumericField("Fronto RMS", this.plFrontoRms, 4,6,"pix",
"Target half-thikness of the fronto planes. Similar to sqrt(plMaxEigen) for other planes");
gd.addNumericField("Fronto offset", this.plFrontoOffs, 4,6,"pix",
gd.addNumericField("Fronto offset", this.plFrontoOffs, 4,6,"pix",
"Increasing weights of the near tiles by using difference between tile disparity and reduced by this value average as weight. If <= 0 - disable feature");
gd.addNumericField("Fronto power", this.PlFrontoPow, 4,6,"pix",
"Increasing weights of the near tiles by even more (see previous parameter) by raising disparity difference to this power");
gd.addNumericField("Blur disparity histograms for constant disparity clusters by this sigma (in bins)", this.plBlurBinVert, 6);
gd.addNumericField("Blur disparity histograms for horizontal clusters by this sigma (in bins)", this.plBlurBinHor, 6);
gd.addNumericField("Maximal normalized disparity difference when initially assigning to vertical plane", this.plMaxDiffVert, 6);
......@@ -5144,6 +5234,8 @@ public class EyesisCorrectionParameters {
gd.addTab ("Debug", "Other debug images");
gd.addMessage ("--- Other debug images ---");
// clt_parameters.debug_initial_discriminate, // final boolean debug_initial_discriminate,
gd.addCheckbox ("Debug initial clusterization of the supertile tiles", this.debug_initial_discriminate);
gd.addCheckbox ("Test new mode after migration", this.dbg_migrate);
gd.addNumericField("Temporay exit stage (0- normal execution)", this.dbg_early_exit, 0,6,"","Temporary exit at intermediate stage (0 - normal)");
......@@ -5567,11 +5659,27 @@ public class EyesisCorrectionParameters {
this.gr_unique_tol= gd.getNextNumber();
this.gr_unique_pretol= gd.getNextNumber();
this.ft_mod_strength= gd.getNextBoolean();
this.ft_clusterize_by_highest= gd.getNextBoolean();
this.ft_clust_sigma= gd.getNextNumber();
this.ft_disp_arange_vert= gd.getNextNumber();
this.ft_disp_rrange_vert= gd.getNextNumber();
this.ft_disp_arange_hor= gd.getNextNumber();
this.ft_disp_rrange_hor= gd.getNextNumber();
this.ft_tolerance_above_near= gd.getNextNumber();
this.ft_tolerance_below_near= gd.getNextNumber();
this.ft_tolerance_above_far= gd.getNextNumber();
this.ft_tolerance_below_far= gd.getNextNumber();
this.ft_hor_vert_overlap= (int) gd.getNextNumber();
this.ft_used_companions= (int) gd.getNextNumber();
this.ft_used_true_companions= (int) gd.getNextNumber();
this.plPreferDisparity= gd.getNextBoolean();
this.plDispNorm= gd.getNextNumber();
this.plFrontoTol = gd.getNextNumber();
this.plFrontoRms = gd.getNextNumber();
this.plFrontoOffs = gd.getNextNumber();
this.PlFrontoPow = gd.getNextNumber();
this.plBlurBinVert= gd.getNextNumber();
this.plBlurBinHor= gd.getNextNumber();
......@@ -5790,7 +5898,8 @@ public class EyesisCorrectionParameters {
this.taEnFlaps= gd.getNextBoolean();
this.taEnMismatch= gd.getNextBoolean();
this.dbg_migrate= gd.getNextBoolean();
this.debug_initial_discriminate= gd.getNextBoolean();
this.dbg_migrate= gd.getNextBoolean();
this.dbg_early_exit = (int) gd.getNextNumber();
this.show_first_bg= gd.getNextBoolean();
......
......@@ -584,6 +584,7 @@ private Panel panel1,
// addButton("CLT 2*4 images - 2", panelClt4, color_conf_process);
// addButton("CLT 2*4 images - 3", panelClt4, color_conf_process);
addButton("SHOW extrinsics", panelClt4, color_configure);
addButton("RIG DSI", panelClt4, color_conf_process);
addButton("MAIN extrinsics", panelClt4, color_process);
addButton("AUX extrinsics", panelClt4, color_process);
addButton("RIG extrinsics", panelClt4, color_conf_process);
......@@ -4263,83 +4264,14 @@ private Panel panel1,
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
clt3d(adjust_extrinsics, adjust_poly);
/*
if (QUAD_CLT == null){
QUAD_CLT = new QuadCLT (
QuadCLT.PREFIX,
PROPERTIES,
EYESIS_CORRECTIONS,
CORRECTION_PARAMETERS);
if (DEBUG_LEVEL > 0){
System.out.println("Created new QuadCLT instance, will need to read CLT kernels");
}
}
String configPath=getSaveCongigPath();
if (configPath.equals("ABORT")) return;
EYESIS_CORRECTIONS.initSensorFiles(DEBUG_LEVEL);
int numChannels=EYESIS_CORRECTIONS.getNumChannels();
CHANNEL_GAINS_PARAMETERS.modifyNumChannels(numChannels);
if (!QUAD_CLT.CLTKernelsAvailable()){
if (DEBUG_LEVEL > 0){
System.out.println("Reading CLT kernels");
}
QUAD_CLT.readCLTKernels(
CLT_PARAMETERS,
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
if (DEBUG_LEVEL > 1){
QUAD_CLT.showCLTKernels(
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
}
}
if (!QUAD_CLT.geometryCorrectionAvailable()){
if (DEBUG_LEVEL > 0){
System.out.println("Calculating geometryCorrection");
}
if (!QUAD_CLT.initGeometryCorrection(DEBUG_LEVEL+2)){
return;
}
}
QUAD_CLT.processCLTQuads3d(
adjust_extrinsics, // boolean adjust_extrinsics,
adjust_poly, // boolean adjust_poly,
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
// NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
EQUIRECTANGULAR_PARAMETERS, // EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
// CONVOLVE_FFT_SIZE, //int convolveFFTSize, // 128 - fft size, kernel size should be size/2
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
}
*/
return;
} else if (label.equals("AUX extrinsics") || label.equals("AUX Poly corr")) {
boolean adjust_extrinsics = label.equals("AUX extrinsics") || label.equals("AUX Poly corr");
boolean adjust_poly = label.equals("AUX Poly corr");
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
clt3d_aux(adjust_extrinsics, adjust_poly);
/*
if (QUAD_CLT_AUX == null){
if (EYESIS_CORRECTIONS_AUX == null) {
EYESIS_CORRECTIONS_AUX = new EyesisCorrections(SYNC_COMMAND.stopRequested,CORRECTION_PARAMETERS.getAux());
......@@ -4408,6 +4340,7 @@ private Panel panel1,
true,
PROPERTIES);
}
*/
return;
} else if (label.equals("CLT planes")) {
......@@ -4518,6 +4451,7 @@ private Panel panel1,
///========================================
QUAD_CLT.batchCLT3d(
TWO_QUAD_CLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
// NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
......@@ -4614,6 +4548,13 @@ private Panel panel1,
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
groundTruth();
return;
/* ======================================================================== */
} else if (label.equals("RIG DSI")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
rigDSI();
return;
/* ======================================================================== */
} else if (label.equals("Show biscan")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
......@@ -5004,7 +4945,13 @@ private Panel panel1,
}
if (TWO_QUAD_CLT == null) {
TWO_QUAD_CLT = new TwoQuadCLT();
TWO_QUAD_CLT = new TwoQuadCLT(QUAD_CLT,QUAD_CLT_AUX);
} else {
// is it needed to update main/aux? Or it never changes?
TWO_QUAD_CLT.quadCLT_main=QUAD_CLT;
TWO_QUAD_CLT.quadCLT_aux=QUAD_CLT_AUX;
// will need to make sure that miScan image is not from the previous scene
}
return true;
}
......@@ -5155,6 +5102,83 @@ private Panel panel1,
// boolean adjust_extrinsics = label.equals("MAIN extrinsics") || label.equals("CLT Poly corr");
// boolean adjust_poly = label.equals("CLT Poly corr");
public boolean clt3d_aux(
boolean adjust_extrinsics,
boolean adjust_poly
) {
if (QUAD_CLT_AUX == null){
if (EYESIS_CORRECTIONS_AUX == null) {
EYESIS_CORRECTIONS_AUX = new EyesisCorrections(SYNC_COMMAND.stopRequested,CORRECTION_PARAMETERS.getAux());
}
QUAD_CLT_AUX = new QuadCLT (
QuadCLT.PREFIX_AUX,
PROPERTIES,
EYESIS_CORRECTIONS_AUX,
CORRECTION_PARAMETERS.getAux());
if (DEBUG_LEVEL > 0){
System.out.println("Created new QuadCLT instance for AUX camera, will need to read CLT kernels for aux camera");
}
}
String configPath=getSaveCongigPath();
if (configPath.equals("ABORT")) return false;
EYESIS_CORRECTIONS_AUX.initSensorFiles(DEBUG_LEVEL);
int numChannelsAux=EYESIS_CORRECTIONS_AUX.getNumChannels();
CHANNEL_GAINS_PARAMETERS_AUX.modifyNumChannels(numChannelsAux);
if (!QUAD_CLT_AUX.CLTKernelsAvailable()){
if (DEBUG_LEVEL > 0){
System.out.println("Reading AUX CLT kernels");
}
QUAD_CLT_AUX.readCLTKernels(
CLT_PARAMETERS,
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
if (DEBUG_LEVEL > 1){
QUAD_CLT_AUX.showCLTKernels(
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
}
}
if (!QUAD_CLT_AUX.geometryCorrectionAvailable()){
if (DEBUG_LEVEL > 0){
System.out.println("Calculating geometryCorrection for AUX camera");
}
if (!QUAD_CLT_AUX.initGeometryCorrection(DEBUG_LEVEL+2)){
return false;
}
}
QUAD_CLT_AUX.processCLTQuads3d(
adjust_extrinsics, // boolean adjust_extrinsics,
adjust_poly, // boolean adjust_poly,
TWO_QUAD_CLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS_AUX, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
EQUIRECTANGULAR_PARAMETERS, // EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
}
return true;
}
public boolean clt3d(
boolean adjust_extrinsics,
boolean adjust_poly
......@@ -5206,6 +5230,7 @@ private Panel panel1,
QUAD_CLT.processCLTQuads3d(
adjust_extrinsics, // boolean adjust_extrinsics,
adjust_poly, // boolean adjust_poly,
TWO_QUAD_CLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
// NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
......@@ -5218,9 +5243,6 @@ private Panel panel1,
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
......@@ -5272,6 +5294,53 @@ private Panel panel1,
return true;
}
public boolean rigDSI() {
long startTime=System.nanoTime();
if ((QUAD_CLT == null) || (QUAD_CLT.tp == null) || (QUAD_CLT.tp.clt_3d_passes == null)) {
boolean OK = clt3d(
false, // boolean adjust_extrinsics,
false); // boolean adjust_poly);
if (! OK) {
String msg = "DSI data is not available and \"CLT 3D\" failed";
IJ.showMessage("Error",msg);
System.out.println(msg);
return false;
}
}
if (!prepareRigImages()) return false;
String configPath=getSaveCongigPath();
if (configPath.equals("ABORT")) return false;
if (DEBUG_LEVEL > -2){
System.out.println("++++++++++++++ Creating a dual camera rig DSI from a single camera DSI ++++++++++++++");
}
boolean OK = (TWO_QUAD_CLT.rigInitialScan( // actually there is no sense to process multiple image sets. Combine with other processing?
QUAD_CLT, // QuadCLT quadCLT_main,
QUAD_CLT_AUX, // QuadCLT quadCLT_aux,
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL -2) != null);
if (!OK) {
System.out.println("rigDSI(): Processing FAILED at "+
IJ.d2s(0.000000001*(System.nanoTime()-startTime),3)+" sec, --- Free memory="+
Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
return false;
}
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
}
System.out.println("rigDSI(): Processing finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-startTime),3)+" sec, --- Free memory="+
Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
return true;
}
public boolean batchRig() {
long startTime=System.nanoTime();
/* if ((QUAD_CLT == null) || (QUAD_CLT.tp == null) || (QUAD_CLT.tp.clt_3d_passes == null)) {
......
......@@ -35,6 +35,8 @@ public class LinkPlanes {
public double plFrontoTol; // = 0.2; plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
public double plFrontoRms; // = 0.05; // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
public double plFrontoOffs; // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
public double PlFrontoPow; // = 1.0; // increase weight even more
public double plMinStrength; // = 0.1; // Minimal total strength of a plane
public double plMaxEigen; // = 0.05; // Maximal eigenvalue of a plane
public double plEigenFloor; // = 0.01; // Add to eigenvalues of each participating plane and result to validate connections
......@@ -139,6 +141,7 @@ public class LinkPlanes {
plFrontoTol = clt_parameters.plFrontoTol;
plFrontoRms = clt_parameters.plFrontoRms;
plFrontoOffs = clt_parameters.plFrontoOffs;
PlFrontoPow = clt_parameters.PlFrontoPow;
plMaxOverlap = clt_parameters.plMaxOverlap;
......@@ -3994,6 +3997,7 @@ public class LinkPlanes {
plFrontoTol, // final double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
plFrontoRms, // final double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
plFrontoOffs, // final double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
PlFrontoPow, // final double PlFrontoPow, // = 1.0; // increase weight even more
debugLevel, // final int debugLevel)
dbg_tileX,
dbg_tileY);
......@@ -4519,6 +4523,7 @@ public class LinkPlanes {
plFrontoTol, // final double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
plFrontoRms, // final double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
plFrontoOffs, // final double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
PlFrontoPow, // final double PlFrontoPow, // = 1.0; // increase weight even more
debugLevel, // final int debugLevel)
dbg_tileX,
dbg_tileY);
......@@ -4613,6 +4618,7 @@ public class LinkPlanes {
plFrontoTol, // final double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
plFrontoRms, // final double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
plFrontoOffs, // final double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
PlFrontoPow, // final double PlFrontoPow, // = 1.0; // increase weight even more
debugLevel, // final int debugLevel)
dbg_tileX,
dbg_tileY);
......
......@@ -5668,6 +5668,7 @@ public class QuadCLT {
public void processCLTQuads3d(
boolean adjust_extrinsics,
boolean adjust_poly,
TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
EyesisCorrectionParameters.CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
......@@ -5700,31 +5701,56 @@ public class QuadCLT {
scaleExposures, //output // double [] scaleExposures
saturation_imp, //output // boolean [][] saturation_imp,
debugLevel); // int debugLevel);
// if (adjust_extrinsics && (debugLevel >-2)) {
// boolean tmp_exit = (debugLevel > -10); // == true;
// System.out.println("processCLTQuads3d(): adjust_extrinsics="+adjust_extrinsics);
// if (tmp_exit) {
// System.out.println("will now exit. To continue - change variable tmp_exit in debugger" );
// if (tmp_exit) {
// return;
// }
// }
// }
boolean use_rig = (twoQuadCLT != null) && (twoQuadCLT.getBiScan(0) != null);
if (!adjust_extrinsics || !use_rig) {
// Difficult to fix: adjust extrinsics for aux - when it is adjusted alone, it will not match tiles to those of a rig!
// can use only far tiles with small gradients?
// once per quad here
preExpandCLTQuad3d( // returns ImagePlus, but it already should be saved/shown
imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
colorProcParameters,
rgbParameters,
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevel);
// once per quad here
preExpandCLTQuad3d( // returns ImagePlus, but it already should be saved/shown
imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
colorProcParameters,
rgbParameters,
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevel);
// adjust extrinsics here
// adjust extrinsics here
}
if (adjust_extrinsics) {
System.out.println("Adjust extrinsics here");
extrinsicsCLT(
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
adjust_poly,
threadsMax, //final int threadsMax, // maximal number of threads to launch
updateStatus,// final boolean updateStatus,
debugLevel); // final int debugLevel)
if (use_rig) {
System.out.println("Adjust extrinsics using rig data here");
extrinsicsCLTfromGT(
twoQuadCLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
adjust_poly,
threadsMax, //final int threadsMax, // maximal number of threads to launch
updateStatus,// final boolean updateStatus,
debugLevel + 2); // final int debugLevel)
} else {
System.out.println("Adjust extrinsics here");
extrinsicsCLT(
// twoQuadCLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
adjust_poly,
threadsMax, //final int threadsMax, // maximal number of threads to launch
updateStatus,// final boolean updateStatus,
debugLevel); // final int debugLevel)
}
} else {
expandCLTQuad3d( // returns ImagePlus, but it already should be saved/shown
......@@ -6205,8 +6231,10 @@ public class QuadCLT {
double min_poly_update = clt_parameters.lym_poly_change; // Parameter vector difference to exit from polynomial correction
int bg_scan = 0;
int combo_scan= tp.clt_3d_passes.size()-1;
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel >-1)) {
// if (!batch_mode && (debugLevel >-1)) {
// if (!batch_mode && (debugLevel >-1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan"); //String title)
......@@ -6215,6 +6243,13 @@ public class QuadCLT {
"combo_scan-"+combo_scan); //String title)
}
boolean [] bg_sel = null;
boolean [] bg_use = null;
double [] combo_disp = null;
double [] combo_str = null;
boolean [] combo_use = null;
double [] combo_overexp = null;
int num_combo = 0;
double [][] filtered_bgnd_disp_strength = tp.getFilteredDisparityStrength(
tp.clt_3d_passes, // final ArrayList <CLTPass3d> passes,// List, first, last - to search for the already tried disparity
bg_scan, // final int measured_scan_index, // will not look at higher scans
......@@ -6238,10 +6273,11 @@ public class QuadCLT {
clt_parameters.tileX, // dbg_x, // final int dbg_x,
clt_parameters.tileX, // dbg_y, // final int dbg_y,
debugLevelInner); // final int debugLevel)
// prepare re-measurements of background
boolean [] bg_sel = tp.clt_3d_passes.get(bg_scan).getSelected();
boolean [] bg_use = new boolean [bg_sel.length];
// double [] bg_disp = tp.clt_3d_passes.get(bg_scan).getDisparity(0);
bg_sel = tp.clt_3d_passes.get(bg_scan).getSelected();
bg_use = new boolean [bg_sel.length];
// double [] bg_disp = tp.clt_3d_passes.get(bg_scan).getDisparity(0);
double [] bg_str = tp.clt_3d_passes.get(bg_scan).getStrength();
double [] bg_overexp = tp.clt_3d_passes.get(bg_scan).getOverexposedFraction();
for (int nTile = 0 ; nTile < bg_use.length; nTile++) {
......@@ -6254,14 +6290,14 @@ public class QuadCLT {
}
}
int num_bg = tp.clt_3d_passes.get(bg_scan).setTileOpDisparity( // other minimal strength?
bg_use, // boolean [] selection,
null); // double [] disparity); // null for 0
bg_use, // boolean [] selection,
null); // double [] disparity); // null for 0
// Prepare measurement of combo-scan - remove low strength and what was used for background
double [] combo_disp = tp.clt_3d_passes.get(combo_scan).getDisparity(0);
double [] combo_str = tp.clt_3d_passes.get(combo_scan).getStrength();
boolean [] combo_use = new boolean [bg_sel.length];
double [] combo_overexp = tp.clt_3d_passes.get(combo_scan).getOverexposedFraction();
combo_disp = tp.clt_3d_passes.get(combo_scan).getDisparity(0);
combo_str = tp.clt_3d_passes.get(combo_scan).getStrength();
combo_use = new boolean [bg_sel.length];
combo_overexp = tp.clt_3d_passes.get(combo_scan).getOverexposedFraction();
for (int nTile = 0 ; nTile < bg_use.length; nTile++) {
if (!bg_use[nTile] &&
(combo_str[nTile] > clt_parameters.fcorr_inf_strength) &&
......@@ -6270,7 +6306,7 @@ public class QuadCLT {
combo_use[nTile] = true;
}
}
int num_combo = tp.clt_3d_passes.get(combo_scan).setTileOpDisparity(
num_combo = tp.clt_3d_passes.get(combo_scan).setTileOpDisparity(
combo_use, // boolean [] selection,
combo_disp); // double [] disparity);
if (debugLevel > -1) {
......@@ -6278,25 +6314,27 @@ public class QuadCLT {
}
// measure combo
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
null, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel >-1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_post"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_post"); //String title)
}
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel >-1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_post"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_post"); //String title)
}
double [][] filtered_combo_scand_isp_strength = tp.getFilteredDisparityStrength(
tp.clt_3d_passes, // final ArrayList <CLTPass3d> passes,// List, first, last - to search for the already tried disparity
......@@ -6361,7 +6399,7 @@ public class QuadCLT {
dbg_bg_sel,
dbg_bg_use, // too few
dbg_combo_use};
(new showDoubleFloatArrays()).showArrays(dbg_img, tp.getTilesX(), tp.getTilesY(), true, "extrinsics_bgnd_combo",titles);
(new showDoubleFloatArrays()).showArrays(dbg_img, tp.getTilesX(), tp.getTilesY(), true, "extrinsics_bgnd_combo",titles);
}
AlignmentCorrection ac = new AlignmentCorrection(this);
// iteration steps
......@@ -6372,51 +6410,58 @@ public class QuadCLT {
}
double [][] bg_mismatch = new double[12][];
double [][] combo_mismatch = new double[12][];
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
bg_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
bg_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
combo_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
double [][] scans14 = new double [28][];
scans14[14 * 0 + 0] = tp.clt_3d_passes.get(bg_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM]; // .getDisparity(0);
scans14[14 * 0 + 1] = tp.clt_3d_passes.get(bg_scan).getStrength();
scans14[14 * 1 + 0] = tp.clt_3d_passes.get(combo_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM];
scans14[14 * 1 + 1] = tp.clt_3d_passes.get(combo_scan).getStrength();
for (int i = 0; i < bg_mismatch.length; i++) {
scans14[14 * 0 + 2 + i] = bg_mismatch[i];
scans14[14 * 1 + 2 + i] = combo_mismatch[i];
}
if (debugLevelInner > 0) {
(new showDoubleFloatArrays()).showArrays(scans14, tp.getTilesX(), tp.getTilesY(), true, "scans_14"); // , titles);
}
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel > 1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_iter"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_iter"); //String title)
}
double [][] target_disparity = {tp.clt_3d_passes.get(bg_scan).getDisparity(0), tp.clt_3d_passes.get(combo_scan).getDisparity(0)};
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
bg_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
bg_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
combo_scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
combo_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
double [][] scans14 = new double [28][];
scans14[14 * 0 + 0] = tp.clt_3d_passes.get(bg_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM]; // .getDisparity(0);
scans14[14 * 0 + 1] = tp.clt_3d_passes.get(bg_scan).getStrength();
scans14[14 * 1 + 0] = tp.clt_3d_passes.get(combo_scan).disparity_map[ImageDtt.DISPARITY_INDEX_CM];
scans14[14 * 1 + 1] = tp.clt_3d_passes.get(combo_scan).getStrength();
for (int i = 0; i < bg_mismatch.length; i++) {
scans14[14 * 0 + 2 + i] = bg_mismatch[i];
scans14[14 * 1 + 2 + i] = combo_mismatch[i];
}
if (debugLevelInner > 0) {
(new showDoubleFloatArrays()).showArrays(scans14, tp.getTilesX(), tp.getTilesY(), true, "scans_14"); // , titles);
}
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel > 1)) {
tp.showScan(
tp.clt_3d_passes.get(bg_scan), // CLTPass3d scan,
"bg_scan_iter"); //String title)
tp.showScan(
tp.clt_3d_passes.get(combo_scan), // CLTPass3d scan,
"combo_scan-"+combo_scan+"_iter"); //String title)
}
double [][] target_disparity = {tp.clt_3d_passes.get(bg_scan).getDisparity(0), tp.clt_3d_passes.get(combo_scan).getDisparity(0)};
// TODO: fix above for using GT
// use lazyEyeCorrectionFromGT(..) when ground truth data is available
double [][][] new_corr = ac.lazyEyeCorrection(
adjust_poly, // final boolean use_poly,
true, // final boolean restore_disp_inf, // Restore subtracted disparity for scan #0 (infinity)
......@@ -6454,53 +6499,221 @@ public class QuadCLT {
tp.getTilesX(), // int tilesX,
clt_parameters.corr_magic_scale, // double magic_coeff, // still not understood coefficent that reduces reported disparity value. Seems to be around 8.5
debugLevelInner - 1); // + (clt_parameters.fine_dbg ? 1:0)); // int debugLevel)
comp_diff = 0.0;
int num_pars = 0;
if (adjust_poly) {
comp_diff = 0.0;
int num_pars = 0;
if (adjust_poly) {
apply_fine_corr(
new_corr,
debugLevelInner + 2);
for (int n = 0; n < new_corr.length; n++){
for (int d = 0; d < new_corr[n].length; d++){
for (int i = 0; i < new_corr[n][d].length; i++){
comp_diff += new_corr[n][d][i] * new_corr[n][d][i];
num_pars++;
}
}
}
comp_diff = Math.sqrt(comp_diff/num_pars);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_poly_update)) {
System.out.println("#### fine correction iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_poly_update+")");
}
}
if (comp_diff < min_poly_update) { // add other parameter to exit from poly
break;
}
} else {
for (int i = 0; i < new_corr[0][0].length; i++){
comp_diff += new_corr[0][0][i] * new_corr[0][0][i];
}
comp_diff = Math.sqrt(comp_diff);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_sym_update)) {
System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]);
System.out.println("New extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
}
if (comp_diff < min_sym_update) {
break;
}
}
}
return (comp_diff < (adjust_poly ? min_poly_update : min_sym_update));
}
public boolean extrinsicsCLTfromGT(
TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
EyesisCorrectionParameters.CLTParameters clt_parameters,
boolean adjust_poly,
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel)
{
final boolean batch_mode = clt_parameters.batch_run;
int debugLevelInner = batch_mode ? -5: debugLevel;
// boolean update_disp_from_latest = clt_parameters.lym_update_disp ; // true;
int max_tries = clt_parameters.lym_iter; // 25;
double min_sym_update = clt_parameters.lym_change; // 4e-6; // stop iterations if no angle changes more than this
double min_poly_update = clt_parameters.lym_poly_change; // Parameter vector difference to exit from polynomial correction
if ((twoQuadCLT == null) || (twoQuadCLT.getBiScan(0) == null)){
System.out.println("Rig data is not available, aborting");
return false;
}
BiScan scan = twoQuadCLT.getBiScan(0);
double [][] rig_disp_strength = scan.getDisparityStrength(
true, // final boolean only_strong,
true, // final boolean only_trusted,
true) ; // final boolean only_enabled);
if (debugLevel > 20) {
boolean tmp_exit = true;
System.out.println("extrinsicsCLTfromGT()");
if (tmp_exit) {
System.out.println("will now exit. To continue - change variable tmp_exit in debugger" );
if (tmp_exit) {
return false;
}
}
}
CLTPass3d comboScan = tp.compositeScan(
rig_disp_strength[0], // final double [] disparity,
rig_disp_strength[1], // final double [] strength,
null, // final boolean [] selected,
debugLevel); // final int debugLevel)
// comboScan will remain the same through iterations, no need to update disparity (maybe shrink selection?
AlignmentCorrection ac = new AlignmentCorrection(this);
// iteration steps
double comp_diff = min_sym_update + 1; // (> min_sym_update)
for (int num_iter = 0; num_iter < max_tries; num_iter++){
double [][] combo_mismatch = new double[12][];
CLTMeasure( // perform single pass according to prepared tiles operations and disparity
image_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
comboScan, // final CLTPass3d scan,
false, // final boolean save_textures,
true, // final boolean save_corr,
combo_mismatch, // final double [][] mismatch, // null or double [12][]
tp.threadsMax, // maximal number of threads to launch
false, // updateStatus,
debugLevelInner - 1);
double [][] scans14 = new double [14][];
scans14[14 * 0 + 0] = comboScan.disparity_map[ImageDtt.DISPARITY_INDEX_CM]; // .getDisparity(0);
scans14[14 * 0 + 1] = comboScan.getStrength();
for (int i = 0; i < combo_mismatch.length; i++) {
scans14[14 * 0 + 2 + i] = combo_mismatch[i];
}
if (debugLevelInner > 0) {
(new showDoubleFloatArrays()).showArrays(scans14, tp.getTilesX(), tp.getTilesY(), true, "scans_14"); // , titles);
}
if (!batch_mode && clt_parameters.show_extrinsic && (debugLevel > 1)) {
tp.showScan(
comboScan, // CLTPass3d scan,
"combo_scan-"+num_iter+"_iter"); //String title)
}
double [][][] new_corr;
final boolean filter_ds = false; // true;
final boolean filter_lyf = false; // ~clt_parameters.lyf_filter, but may be different, now off for a single cameras
final double inf_max_disparity = 2.0;
double [][][] gt_disparity_strength = {rig_disp_strength};
new_corr = ac.lazyEyeCorrectionFromGT(
adjust_poly, // final boolean use_poly,
true, // final boolean restore_disp_inf, // Restore subtracted disparity for scan #0 (infinity)
clt_parameters.fcorr_radius, // final double fcorr_radius,
clt_parameters.fcorr_inf_strength, // final double min_strenth,
clt_parameters.inf_str_pow, // 1.0, // final double strength_pow,
0.8*clt_parameters.disp_scan_step, // 1.5, // final double lazyEyeCompDiff, // clt_parameters.fcorr_disp_diff
clt_parameters.ly_smpl_side, // 3, // final int lazyEyeSmplSide, // = 2; // Sample size (side of a square)
clt_parameters.ly_smpl_num, // 5, // final int lazyEyeSmplNum, // = 3; // Number after removing worst (should be >1)
clt_parameters.ly_smpl_rms, // 0.1, // final double lazyEyeSmplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
clt_parameters.ly_disp_var, // 0.2, // final double lazyEyeDispVariation, // 0.2, maximal full disparity difference between tgh tile and 8 neighborxs
clt_parameters.ly_disp_rvar, // 0.2, // final double lazyEyeDispRelVariation, // 0.02 Maximal relative full disparity difference to 8 neighbors
clt_parameters.ly_norm_disp, // final double ly_norm_disp, // = 5.0; // Reduce weight of higher disparity tiles
clt_parameters.inf_smpl_side, // 3, // final int smplSide, // = 2; // Sample size (side of a square)
clt_parameters.inf_smpl_num, // 5, // final int smplNum, // = 3; // Number after removing worst (should be >1)
clt_parameters.inf_smpl_rms, // 0.1, // 0.05, // final double smplRms, // = 0.1; // Maximal RMS of the remaining tiles in a sample
// histogram parameters
clt_parameters.ih_smpl_step, // 8, // final int hist_smpl_side, // 8 x8 masked, 16x16 sampled
clt_parameters.ih_disp_min, // -1.0, // final double hist_disp_min,
clt_parameters.ih_disp_step, // 0.05, // final double hist_disp_step,
clt_parameters.ih_num_bins, // 40, // final int hist_num_bins,
clt_parameters.ih_sigma, // 0.1, // final double hist_sigma,
clt_parameters.ih_max_diff, // 0.1, // final double hist_max_diff,
clt_parameters.ih_min_samples, // 10, // final int hist_min_samples,
clt_parameters.ih_norm_center, // true, // final boolean hist_norm_center, // if there are more tiles that fit than min_samples, replace with
clt_parameters.ly_inf_frac, // 0.5, // final double inf_fraction, // fraction of the weight for the infinity tiles
inf_max_disparity, // final double inf_max_disparity, // use all smaller disparities as inf_fraction
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
scans14, // disp_strength, // scans, // double [][] disp_strength,
gt_disparity_strength, // double [][][] gt_disparity_strength, // 1 pair for each 14 entries of scans_14 (normally - just 1 scan
filter_ds, // final boolean filter_ds, //
filter_lyf, // final boolean filter_lyf, // ~clt_parameters.lyf_filter, but may be different, now off for a single cameras
tp.getTilesX(), // int tilesX,
clt_parameters.corr_magic_scale, // double magic_coeff, // still not understood coefficent that reduces reported disparity value. Seems to be around 8.5
debugLevelInner - 1); // + (clt_parameters.fine_dbg ? 1:0)); // int debugLevel)
comp_diff = 0.0;
int num_pars = 0;
if (adjust_poly) {
apply_fine_corr(
new_corr,
debugLevelInner + 2);
for (int n = 0; n < new_corr.length; n++){
for (int d = 0; d < new_corr[n].length; d++){
for (int i = 0; i < new_corr[n][d].length; i++){
comp_diff += new_corr[n][d][i] * new_corr[n][d][i];
num_pars++;
}
}
}
comp_diff = Math.sqrt(comp_diff/num_pars);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_poly_update)) {
System.out.println("#### fine correction iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_poly_update+")");
}
}
for (int n = 0; n < new_corr.length; n++){
for (int d = 0; d < new_corr[n].length; d++){
for (int i = 0; i < new_corr[n][d].length; i++){
comp_diff += new_corr[n][d][i] * new_corr[n][d][i];
num_pars++;
}
}
}
comp_diff = Math.sqrt(comp_diff/num_pars);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_poly_update)) {
System.out.println("#### fine correction iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_poly_update+")");
}
}
if (comp_diff < min_poly_update) { // add other parameter to exit from poly
break;
}
} else {
for (int i = 0; i < new_corr[0][0].length; i++){
comp_diff += new_corr[0][0][i] * new_corr[0][0][i];
}
comp_diff = Math.sqrt(comp_diff);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_sym_update)) {
System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]);
System.out.println("New extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
}
} else {
for (int i = 0; i < new_corr[0][0].length; i++){
comp_diff += new_corr[0][0][i] * new_corr[0][0][i];
}
comp_diff = Math.sqrt(comp_diff);
if (debugLevel > -2) {
if ((debugLevel > -1) || (comp_diff < min_sym_update)) {
System.out.println("#### extrinsicsCLT(): iteration step = "+(num_iter + 1) + " ( of "+max_tries+") change = "+
comp_diff + " ("+min_sym_update+"), previous RMS = " + new_corr[0][1][0]);
System.out.println("New extrinsic corrections:");
System.out.println(geometryCorrection.getCorrVector().toString());
}
}
if (comp_diff < min_sym_update) {
break;
}
}
}
}
return (comp_diff < (adjust_poly ? min_poly_update : min_sym_update));
}
public boolean expandCLTQuad3d(
// ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
EyesisCorrectionParameters.CLTParameters clt_parameters,
......@@ -8175,8 +8388,8 @@ public class QuadCLT {
final boolean updateStatus,
final int debugLevel)
{
final int dbg_x = 295;
final int dbg_y = 160;
final int dbg_x = -295-debugLevel;
final int dbg_y = -160-debugLevel;
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
CLTPass3d scan = tp.clt_3d_passes.get(scanIndex);
......@@ -8192,7 +8405,7 @@ public class QuadCLT {
if (tile_op[ty][tx] != 0) numTiles ++;
}
System.out.println("CLTMeasure("+scanIndex+"): numTiles = "+numTiles);
if (tile_op[dbg_y][dbg_x] != 0){
if ((dbg_y >= 0) && (dbg_x >= 0) && (tile_op[dbg_y][dbg_x] != 0)){
System.out.println("CLTMeasure("+scanIndex+"): tile_op["+dbg_y+"]["+dbg_x+"] = "+tile_op[dbg_y][dbg_x]);
}
}
......@@ -8284,6 +8497,137 @@ public class QuadCLT {
}
public CLTPass3d CLTMeasure( // perform single pass according to prepared tiles operations and disparity
final double [][][] image_data, // first index - number of image in a quad
final boolean [][] saturation_imp, // (near) saturated pixels or null
final EyesisCorrectionParameters.CLTParameters clt_parameters,
final CLTPass3d scan,
final boolean save_textures,
final boolean save_corr,
final double [][] mismatch, // null or double [12][]
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel)
{
final int dbg_x = -295-debugLevel;
final int dbg_y = -160-debugLevel;
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
double [] disparity = scan.getDisparity();
double [] strength = scan.getStrength();
boolean [] selection = scan.getSelected();
if (selection == null) {
selection = new boolean[tilesX*tilesY];
for (int nTile = 0; nTile < selection.length; nTile++) {
selection[nTile] = !Double.isNaN(disparity[nTile]) && (strength[nTile] > 0.0);
}
scan.setSelected(selection);
}
if ((scan.disparity == null) || (scan.tile_op == null)) {
scan.setTileOpDisparity(
scan.getSelected(), // boolean [] selection,
scan.getDisparity()); // double [] disparity)
}
int [][] tile_op = scan.tile_op;
double [][] disparity_array = scan.disparity;
// undecided, so 2 modes of combining alpha - same as rgb, or use center tile only
double [][][][] clt_corr_combo = new double [ImageDtt.TCORR_TITLES.length][tilesY][tilesX][]; // will only be used inside?
if (debugLevel > -1){
int numTiles = 0;
for (int ty = 0; ty < tile_op.length; ty ++) for (int tx = 0; tx < tile_op[ty].length; tx ++){
if (tile_op[ty][tx] != 0) numTiles ++;
}
System.out.println("CLTMeasure(): numTiles = "+numTiles);
if ((dbg_y >= 0) && (dbg_x >= 0) && (tile_op[dbg_y][dbg_x] != 0)){
System.out.println("CLTMeasure(): tile_op["+dbg_y+"]["+dbg_x+"] = "+tile_op[dbg_y][dbg_x]);
}
}
double min_corr_selected = clt_parameters.min_corr;
double [][] disparity_map = save_corr ? new double [ImageDtt.DISPARITY_TITLES.length][] : null; //[0] -residual disparity, [1] - orthogonal (just for debugging)
double [][] shiftXY = new double [4][2];
if (!clt_parameters.fine_corr_ignore) {
double [][] shiftXY0 = {
{clt_parameters.fine_corr_x_0,clt_parameters.fine_corr_y_0},
{clt_parameters.fine_corr_x_1,clt_parameters.fine_corr_y_1},
{clt_parameters.fine_corr_x_2,clt_parameters.fine_corr_y_2},
{clt_parameters.fine_corr_x_3,clt_parameters.fine_corr_y_3}};
shiftXY = shiftXY0;
}
double [][][][] texture_tiles = save_textures ? new double [tilesY][tilesX][][] : null; // ["RGBA".length()][];
ImageDtt image_dtt = new ImageDtt();
double z_correction = clt_parameters.z_correction;
if (clt_parameters.z_corr_map.containsKey(image_name)){
z_correction +=clt_parameters.z_corr_map.get(image_name);
}
final double disparity_corr = (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
image_dtt.clt_aberrations_quad_corr(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
1, // final int macro_scale, // to correlate tile data instead of the pixel data: 1 - pixels, 8 - tiles
tile_op, // per-tile operation bit codes
disparity_array, // clt_parameters.disparity, // final double disparity,
image_data, // final double [][][] imade_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
// correlation results - final and partial
clt_corr_combo, // [tp.tilesY][tp.tilesX][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
null, // clt_corr_partial, // [tp.tilesY][tp.tilesX][quad]color][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
mismatch, // null, // [tp.tilesY][tp.tilesX][pair]{dx,dy,weight}[(2*transform_size-1)*(2*transform_size-1)] // transpose unapplied. null - do not calculate
// Use it with disparity_maps[scan_step]? clt_mismatch, // [tp.tilesY][tp.tilesX][pair]{dx,dy,weight}[(2*transform_size-1)*(2*transform_size-1)] // transpose unapplied. null - do not calculate
disparity_map, // [12][tp.tilesY * tp.tilesX]
texture_tiles, // [tp.tilesY][tp.tilesX]["RGBA".length()][];
tilesX * clt_parameters.transform_size, // imp_quad[0].getWidth(), // final int width,
clt_parameters.fat_zero, // add to denominator to modify phase correlation (same units as data1, data2). <0 - pure sum
clt_parameters.corr_sym,
clt_parameters.corr_offset,
clt_parameters.corr_red,
clt_parameters.corr_blue,
clt_parameters.corr_sigma,
clt_parameters.corr_normalize, // normalize correlation results by rms
min_corr_selected, // 0.0001; // minimal correlation value to consider valid
clt_parameters.max_corr_sigma,// 1.5; // weights of points around global max to find fractional
clt_parameters.max_corr_radius,
clt_parameters.max_corr_double, // Double pass when masking center of mass to reduce preference for integer values
clt_parameters.corr_mode, // Correlation mode: 0 - integer max, 1 - center of mass, 2 - polynomial
clt_parameters.min_shot, // 10.0; // Do not adjust for shot noise if lower than
clt_parameters.scale_shot, // 3.0; // scale when dividing by sqrt ( <0 - disable correction)
clt_parameters.diff_sigma, // 5.0;//RMS difference from average to reduce weights (~ 1.0 - 1/255 full scale image)
clt_parameters.diff_threshold, // 5.0; // RMS difference from average to discard channel (~ 1.0 - 1/255 full scale image)
clt_parameters.diff_gauss, // true; // when averaging images, use gaussian around average as weight (false - sharp all/nothing)
clt_parameters.min_agree, // 3.0; // minimal number of channels to agree on a point (real number to work with fuzzy averages)
clt_parameters.dust_remove, // Do not reduce average weight when only one image differes much from the average
clt_parameters.keep_weights, // Add port weights to RGBA stack (debug feature)
geometryCorrection, // final GeometryCorrection geometryCorrection,
clt_kernels, // final double [][][][][][] clt_kernels, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step,
clt_parameters.transform_size,
clt_parameters.clt_window,
shiftXY, //
disparity_corr, // final double disparity_corr, // disparity at infinity
(clt_parameters.fcorr_ignore? null: this.fine_corr),
clt_parameters.corr_magic_scale, // still not understood coefficient that reduces reported disparity value. Seems to be around 0.85
clt_parameters.shift_x, // final int shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final int shiftY, // shift image vertically (positive - down)
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
(clt_parameters.dbg_mode & 64) != 0, // no fract shift
(clt_parameters.dbg_mode & 128) != 0, // no convolve
// (clt_parameters.dbg_mode & 256) != 0, // transpose convolve
threadsMax,
debugLevel);
scan.disparity_map = disparity_map;
scan.texture_tiles = texture_tiles;
scan.is_measured = true;
scan.is_combo = false;
scan.resetProcessed();
return scan;
}
public ImagePlus [] conditionImageSetBatch( // used in batchCLT3d
final int nSet, // index of the 4-image set
final EyesisCorrectionParameters.CLTParameters clt_parameters,
......@@ -8539,6 +8883,7 @@ public class QuadCLT {
}
public void batchCLT3d(
TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
EyesisCorrectionParameters.CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
......@@ -8710,6 +9055,7 @@ public class QuadCLT {
if (ok) {
System.out.println("Adjusting extrinsics");
extrinsicsCLT(
// twoQuadCLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
false, // adjust_poly,
threadsMax, //final int threadsMax, // maximal number of threads to launch
......@@ -8734,6 +9080,7 @@ public class QuadCLT {
if (ok) {
System.out.println("Adjusting polynomial fine crorection");
extrinsicsCLT(
// twoQuadCLT, // TwoQuadCLT twoQuadCLT, //maybe null in no-rig mode, otherwise may contain rig measurements to be used as infinity ground truth
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
true, // adjust_poly,
threadsMax, //final int threadsMax, // maximal number of threads to launch
......
......@@ -2525,23 +2525,28 @@ public class SuperTiles{
final double [] world_hor, // horizontal plane normal (default [0.0, 1.0, 0.0])
final boolean show_histograms,
final boolean [][] hor_planes, // returns plane types (hor/vert)
// Parameters for alternative initial planes that use lowest disparity for fronto planes, and farthest - for horizontal
final boolean mod_strength, // = true; // FIXME: make a parameter. when set, multiply each tile strength by the number of selected neighbors
final boolean clusterize_by_highest, // = true;
final double clust_sigma, // = 0.7;
final double disp_arange_vert, // = 0.07;
final double disp_rrange_vert, // = 0.01;
final double disp_arange_hor, // = 0.035;
final double disp_rrange_hor, // = 0.005;
final double tolerance_above_near, // = 100.0; // 0.07; any?
final double tolerance_below_near, // = -0.01;
final double tolerance_above_far, // = 0.07;
final double tolerance_below_far, // = 0.1; // 100.0; // any farther
final int hor_vert_overlap, // = 2;
final int used_companions, // = 5; // cell that has this many new used companions is considered used (borders and already use3d are considered used too)
final int used_true_companions, // = 1; // there should be at least this many new selected tiles among neighbors.,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
final int used_companions = 5; // cell that has this many new used companions is considered used (borders and already use3d are considered used too)
final int used_true_companions = 1; // there should be at least this many new selected tiles among neighbors.,
final double clust_sigma = 0.7;
final double disp_arange = 0.07;
final double disp_rrange = 0.01;
final double tolerance_above_near = 100.0; // 0.07; any?
final double tolerance_below_near = -0.01;
final double tolerance_above_far = 0.07;
final double tolerance_below_far = 0.1; // 100.0; // any farther
final int hor_vert_overlap = 2;
final boolean clusterize_by_highest = true;
final boolean mod_strength = true; // FIXME: make a parameter. when set, multiply each tile strength by the number of selected neighbors
final int tilesX = tileProcessor.getTilesX();
final int tilesY = tileProcessor.getTilesY();
final int superTileSize = tileProcessor.getSuperTileSize();
......@@ -2664,8 +2669,8 @@ public class SuperTiles{
stMeasSel, // final int stMeasSel, // = 1; // Select measurements for supertiles : +1 - combo, +2 - quad +4 - hor +8 - vert
plDispNorm, // final double plDispNorm, // to increase weight of nearer planes
clust_sigma, // final double sigma,
0.5 * disp_arange, // final double disp_arange,
0.5 * disp_rrange, // final double disp_rrange,
disp_arange_hor, // final double disp_arange,
disp_rrange_hor, // final double disp_rrange,
tolerance_above_far,// final double tolerance_above,
tolerance_below_far,// final double tolerance_below,
plMinPoints, // final int plMinPoints, // = 5; // Minimal number of points for plane detection
......@@ -2696,22 +2701,22 @@ public class SuperTiles{
}
final boolean [][][][] new_planes_vert = clusterize_by_highest ?
dispClusterizeHighest(
vert_disp_strength, // final double [][][][] disparity_strengths, // either normal or tilted disparity/strengths
null, // final boolean [][][] selected, // tiles OK to be assigned [supertile][measurement layer] [tile index] or null (or null or per-measurement layer)
used_vert, // final boolean [][][] prohibited, // already assigned tiles [supertile][measurement layer] [tile index] or null
false, // final boolean search_min,
stMeasSel, // final int stMeasSel, // = 1; // Select measurements for supertiles : +1 - combo, +2 - quad +4 - hor +8 - vert
plDispNorm, // final double plDispNorm, // to increase weight of nearer planes
clust_sigma, // final double sigma,
disp_arange, // final double disp_arange,
disp_rrange, // final double disp_rrange,
tolerance_above_near, // final double tolerance_above,
tolerance_below_near, // final double tolerance_below,
plMinPoints, // final int plMinPoints, // = 5; // Minimal number of points for plane detection
"vert", // final String suffix,
debugLevel + 0, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y): // final int dbg_Y)
vert_disp_strength, // final double [][][][] disparity_strengths, // either normal or tilted disparity/strengths
null, // final boolean [][][] selected, // tiles OK to be assigned [supertile][measurement layer] [tile index] or null (or null or per-measurement layer)
used_vert, // final boolean [][][] prohibited, // already assigned tiles [supertile][measurement layer] [tile index] or null
false, // final boolean search_min,
stMeasSel, // final int stMeasSel, // = 1; // Select measurements for supertiles : +1 - combo, +2 - quad +4 - hor +8 - vert
plDispNorm, // final double plDispNorm, // to increase weight of nearer planes
clust_sigma, // final double sigma,
disp_arange_vert, // final double disp_arange,
disp_rrange_vert, // final double disp_rrange,
tolerance_above_near, // final double tolerance_above,
tolerance_below_near, // final double tolerance_below,
plMinPoints, // final int plMinPoints, // = 5; // Minimal number of points for plane detection
"vert", // final String suffix,
debugLevel + 0, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y): // final int dbg_Y)
dispClusterize(
vert_disp_strength, // final double [][][][] disparity_strengths, // either normal or tilted disparity/strengths
mmm_vert, // final double [][][] hist_max_min_max, // histogram data: per tile array of odd number of disparity/strengths pairs, starting with first maximum
......@@ -3127,6 +3132,7 @@ public class SuperTiles{
//FIXME: use following 2 parameters
final double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
final double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
final double fronto_pow, // = 1.0; // increase weight even more
// now for regenerated planes - just null as it is not known if it is hor or vert
final boolean [][] hor_planes, // plane types (hor/vert)
final int debugLevel,
......@@ -3191,6 +3197,7 @@ public class SuperTiles{
fronto_tol, // double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
fronto_rms, // double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
fronto_offs, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
dl); // int debugLevel);
if ((st_planes != null) && (!st_planes.isEmpty())){
......@@ -3238,6 +3245,7 @@ public class SuperTiles{
fronto_tol, // double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
fronto_rms, // double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
fronto_offs, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
dl - 1); // int debugLevel);
}
......@@ -3313,6 +3321,7 @@ public class SuperTiles{
final double plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
final double PlFrontoPow, // = 1.0; // increase weight even more
final GeometryCorrection geometryCorrection,
final boolean correct_distortions,
......@@ -3335,11 +3344,27 @@ public class SuperTiles{
final double highMix, //stHighMix = 0.4; // Consider merging initial planes if jumps between ratio above
final double [] world_hor, // horizontal plane normal (default [0.0, 1.0, 0.0])
final boolean show_histograms,
// Parameters for alternative initial planes that use lowest disparity for fronto planes, and farthest - for horizontal
final boolean mod_strength, // = true; // FIXME: make a parameter. when set, multiply each tile strength by the number of selected neighbors
final boolean clusterize_by_highest, // = true;
final double clust_sigma, // = 0.7;
final double disp_arange_vert, // = 0.07;
final double disp_rrange_vert, // = 0.01;
final double disp_arange_hor, // = 0.035;
final double disp_rrange_hor, // = 0.005;
final double tolerance_above_near, // = 100.0; // 0.07; any?
final double tolerance_below_near, // = -0.01;
final double tolerance_above_far, // = 0.07;
final double tolerance_below_far, // = 0.1; // 100.0; // any farther
final int hor_vert_overlap, // = 2;
final int used_companions, // = 5; // cell that has this many new used companions is considered used (borders and already use3d are considered used too)
final int used_true_companions, // = 1; // there should be at least this many new selected tiles among neighbors.,
final boolean debug_initial_discriminate,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
final boolean debug_initial_discriminate = true; // false; // true;
// use both horizontal and const disparity tiles to create tile clusters
// Add max_diff (maximal disparity difference while extracting initial tile selection) and max_tries (2..3) parameters
......@@ -3373,6 +3398,22 @@ public class SuperTiles{
show_histograms, // final boolean show_histograms,
hor_planes, // final boolean [][] hor_planes,
// Parameters for alternative initial planes that use lowest disparity for fronto planes, and farthest - for horizontal
mod_strength, // final boolean mod_strength, // = true; // FIXME: make a parameter. when set, multiply each tile strength by the number of selected neighbors
clusterize_by_highest, // final boolean clusterize_by_highest, // = true;
clust_sigma, // final double clust_sigma, // = 0.7;
disp_arange_vert, // final double disp_arange_vert, // = 0.07;
disp_rrange_vert, // final double disp_rrange_vert, // = 0.01;
disp_arange_hor, // final double disp_arange_hor, // = 0.035;
disp_rrange_hor, // final double disp_rrange_hor, // = 0.005;
tolerance_above_near, // final double tolerance_above_near, // = 100.0; // 0.07; any?
tolerance_below_near, // final double tolerance_below_near, // = -0.01;
tolerance_above_far, // final double tolerance_above_far, // = 0.07;
tolerance_below_far, // final double tolerance_below_far, // = 0.1; // 100.0; // any farther
hor_vert_overlap, // final int hor_vert_overlap, // = 2;
used_companions, // final int used_companions, // = 5; // cell that has this many new used companions is considered used (borders and already use3d are considered used too)
used_true_companions, // final int used_true_companions, // = 1; // there should be at least this many new selected tiles among neighbors.,
debugLevel+(debug_initial_discriminate? 2:0), // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
......@@ -3413,8 +3454,9 @@ public class SuperTiles{
plFrontoTol, // final double plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
plFrontoRms, // final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
plFrontoOffs, // final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
PlFrontoPow, // double fronto_pow, // = 1.0; // increase weight even more
hor_planes, // final boolean [][] hor_planes, // returns plane types (hor/vert)
debugLevel + 1, // 0, // 1, // + 2, // 1, // final int debugLevel,
debugLevel + 0, // 1, // 0, // 1, // + 2, // 1, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
this.planes = new_planes; // save as "measured" (as opposed to "smoothed" by neighbors) planes
......@@ -3473,6 +3515,7 @@ public class SuperTiles{
final double plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
final double PlFrontoPow, // = 1.0; // increase weight even more
final GeometryCorrection geometryCorrection,
final boolean correct_distortions,
......@@ -3576,6 +3619,7 @@ public class SuperTiles{
plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
plFrontoRms, // final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
plFrontoOffs, // final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0
PlFrontoPow, // double fronto_pow, // = 1.0; // increase weight even more
null, // final boolean [][] hor_planes, // plane types (hor/vert)
debugLevel, // + 2, // 1, // final int debugLevel,
dbg_X, // final int dbg_X,
......@@ -6844,6 +6888,7 @@ public class SuperTiles{
// FIXME: the following 2 parameters are not yet used
final double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
final double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
final double fronto_pow, // = 1.0; // increase weight even more
final int debugLevel,
final int dbg_X,
final int dbg_Y)
......@@ -6938,7 +6983,8 @@ public class SuperTiles{
boolean OK = this_pd.removeOutliers( // getPlaneFromMeas should already have run
fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
fronto_rms, // double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
fronto_offs, //double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight.
fronto_offs, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight.
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
disp_strength,
targetV, // double targetEigen, // target eigenvalue for primary axis (is disparity-dependent, so is non-constant)
max_outliers, // int maxRemoved, // maximal number of tiles to remove (not a constant)
......@@ -7060,7 +7106,7 @@ public class SuperTiles{
//FIXME: use following 2 parameters
final double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
final double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
final double fronto_pow, // = 1.0; // increase weight even more
final int debugLevel,
final int dbg_X,
final int dbg_Y)
......@@ -7284,7 +7330,8 @@ public class SuperTiles{
OK = bpd[np][npip].removeOutliers( // getPlaneFromMeas should already have run
fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
fronto_rms, // double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
fronto_offs, //double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced
fronto_offs, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
disp_strength,
targetV, // double targetEigen, // target eigenvalue for primary axis (is disparity-dependent, so is non-constant)
max_outliers, // int maxRemoved, // maximal number of tiles to remove (not a constant)
......
......@@ -1350,6 +1350,7 @@ public class TilePlanes {
double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
double fronto_pow, // = 1.0; // increase weight even more
double [][][] disp_str, // calculate just once when removing outliers (null - OK, will generate it)
double inTargetEigen, // target eigenvalue for primary axis (is disparity-dependent, so is non-constant)
int maxRemoved, // maximal number of tiles to remove (not a constant)
......@@ -1470,7 +1471,7 @@ public class TilePlanes {
almost_fronto, // boolean fronto_mode,
fronto_offs, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
this.mlfp,
debugLevel-1) != null);
......@@ -1516,6 +1517,8 @@ public class TilePlanes {
this.smplMode,
almost_fronto, // boolean fronto_mode,
fronto_offs, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
this.mlfp,
debugLevel-0) != null); // will show image
if (!OK) { // restore how it was
......@@ -1560,8 +1563,10 @@ public class TilePlanes {
min_tiles,
smplMode, // = true; // Use sample mode (false - regular tile mode)
false, // boolean fronto_mode,
0.0, //double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
false, // boolean fronto_mode,
0.0, // double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
1.0, // double fronto_pow, // = 1.0; // increase weight even more
mlfp,
debugLevel);
......@@ -1606,6 +1611,7 @@ public class TilePlanes {
boolean fronto_mode,
double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
double fronto_pow, // = 1.0; // increase weight even more
MeasuredLayersFilterParameters mlfp,
int debugLevel)
......@@ -1823,7 +1829,11 @@ public class TilePlanes {
if (w > 0.0){
double d = disp_str[nl][0][indx];
if (d > d0) {
w *= (d-d0)/fronto_offs; // more weight of the near pixels, same weight of the centre pixels
double kw = (d-d0)/fronto_offs; // more weight of the near pixels, same weight of the centre pixels
if (fronto_pow != 1.00) {
kw = Math.pow(kw, fronto_pow);
}
w *= kw; // increase influence of near pixels even more
disp_str[nl][1][indx] = w;
sw += w;
swz += w * d;
......@@ -4280,6 +4290,7 @@ public class TilePlanes {
double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
double fronto_pow, // = 1.0; // increase weight even more
int debugLevel)
{
if (debugLevel > 2) {
......@@ -4368,13 +4379,14 @@ public class TilePlanes {
if ((pd.getNormValue() > targetEigen) || almost_fronto) { // targetV) {
OK = pd.removeOutliers( // getPlaneFromMeas should already have run
fronto_tol, // double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
fronto_rms, // double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
fronto_offs, //double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
fronto_tol, // double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
fronto_rms, // double fronto_rms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes. May be tighter
fronto_offs, //double fronto_offs, // = 0.2; // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0 - disable
fronto_pow, // double fronto_pow, // = 1.0; // increase weight even more
disp_strength,
plTargetEigen, // targetV, // double targetEigen, // target eigenvalue for primary axis (is disparity-dependent, so is non-constant)
max_outliers, // int maxRemoved, // maximal number of tiles to remove (not a constant)
debugLevel); // int debugLevel)
plTargetEigen, // targetV, // double targetEigen, // target eigenvalue for primary axis (is disparity-dependent, so is non-constant)
max_outliers, // int maxRemoved, // maximal number of tiles to remove (not a constant)
debugLevel); // int debugLevel)
if (!OK) {
continue;
}
......
......@@ -704,14 +704,6 @@ public class TileProcessor {
CLTPass3d combo_pass =new CLTPass3d(this);
final int tlen = tilesX * tilesY;
// final int disparity_index = usePoly ? ImageDtt.DISPARITY_INDEX_POLY : ImageDtt.DISPARITY_INDEX_CM;
// combo_pass.tile_op = new int [tilesY][tilesX]; // for just non-zero
// combo_pass.disparity_map = new double [ImageDtt.DISPARITY_TITLES.length][];
// for (int i = 0; i< ImageDtt.QUAD; i++) combo_pass.disparity_map[ImageDtt.IMG_DIFF0_INDEX + i] = new double[tlen];
// for now - will copy from the best full correlation measurement
// combo_pass.texture_tiles = new double [tilesY][tilesX][][];
// combo_pass.max_tried_disparity = new double [tilesY][tilesX];
combo_pass.is_combo = true;
combo_pass.calc_disparity = disparity.clone(); //new double [tlen];
combo_pass.calc_disparity_combo = disparity.clone(); //new double [tlen];
......@@ -5344,11 +5336,12 @@ public class TileProcessor {
clt_parameters.plFrontoTol, // final double plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
clt_parameters.plFrontoRms, // final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
clt_parameters.plFrontoOffs, // final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0
clt_parameters.PlFrontoPow, // final double PlFrontoPow, // = 1.0; // increase weight even more
geometryCorrection,
clt_parameters.correct_distortions,
clt_parameters.stSmplMode , // final boolean smplMode, // = true; // Use sample mode (false - regular tile mode)
clt_parameters.mlfp, // Filter parameters
clt_parameters.mlfp, // Filter parameters
clt_parameters.plBlurBinHor, // final double bin_blur_hor, // Blur disparity histograms for horizontal clusters by this sigma (in bins)
clt_parameters.plBlurBinVert, // final double bin_blur_vert, // Blur disparity histograms for constant disparity clusters by this sigma (in bins)
......@@ -5365,6 +5358,24 @@ public class TileProcessor {
clt_parameters.stHighMix, // stHighMix = 0.4; // Consider merging initial planes if jumps between ratio above
world_hor, // final double [] world_hor, // horizontal plane normal (default [0.0, 1.0, 0.0])
clt_parameters.show_histograms, // final boolean show_histograms,
clt_parameters.ft_mod_strength, // final boolean mod_strength, // when set, multiply each tile strength by the number of selected neighbors
clt_parameters.ft_clusterize_by_highest, // final boolean clusterize_by_highest = true;
clt_parameters.ft_clust_sigma, // final double clust_sigma = 0.7;
clt_parameters.ft_disp_arange_vert, // final double disp_arange_vert = 0.07;
clt_parameters.ft_disp_rrange_vert, // final double disp_rrange_vert = 0.01;
clt_parameters.ft_disp_arange_hor, // final double disp_arange_hor = 0.035;
clt_parameters.ft_disp_rrange_hor, // final double disp_rrang_hor = 0.005;
clt_parameters.ft_tolerance_above_near, // final double tolerance_above_near = 100.0; // 0.07; any?
clt_parameters.ft_tolerance_below_near, // final double tolerance_below_near = -0.01;
clt_parameters.ft_tolerance_above_far, // final double tolerance_above_far = 0.07;
clt_parameters.ft_tolerance_below_far, // final double tolerance_below_far = 0.1; // 100.0; // any farther
clt_parameters.ft_hor_vert_overlap, // final int hor_vert_overlap = 2;
clt_parameters.ft_used_companions, // final int used_companions = 5; // cell that has this many new used companions is considered used (borders and already use3d are considered used too)
clt_parameters.ft_used_true_companions, // final int used_true_companions = 1; // there should be at least this many new selected tiles among neighbors.,
//clt_parameters.ft_
clt_parameters.debug_initial_discriminate, // final boolean debug_initial_discriminate,
clt_parameters.batch_run?-3:debugLevel, // -1, // debugLevel, // final int debugLevel)
clt_parameters.tileX,
clt_parameters.tileY);
......@@ -5418,6 +5429,7 @@ public class TileProcessor {
clt_parameters.plFrontoTol, // final double plFrontoTol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable
clt_parameters.plFrontoRms, // final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
clt_parameters.plFrontoOffs, // final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0
clt_parameters.PlFrontoPow, // final double PlFrontoPow, // = 1.0; // increase weight even more
geometryCorrection,
clt_parameters.correct_distortions,
clt_parameters.stSmplMode, // final boolean smplMode, // = true; // Use sample mode (false - regular tile mode)
......@@ -5531,6 +5543,7 @@ public class TileProcessor {
clt_parameters.plFrontoTol, //final double fronto_tol, // fronto tolerance (pix) - treat almost fronto as fronto (constant disparity). <= 0 - disable this feature
clt_parameters.plFrontoRms, // final double plFrontoRms, // Target rms for the fronto planes - same as sqrt(plMaxEigen) for other planes
clt_parameters.plFrontoOffs, // final double plFrontoOffs, // increasing weight of the near tiles by using difference between the reduced average as weight. <= 0
clt_parameters.PlFrontoPow, // final double PlFrontoPow, // = 1.0; // increase weight even more
debugLevel, // 1, // final int debugLevel)
clt_parameters.tileX,
clt_parameters.tileY);
......@@ -5859,7 +5872,7 @@ public class TileProcessor {
0, // int nlayer, // over multi-layer - do not render more than nlayer on top of each other
st.getPlanesMod(), // TilePlanes.PlaneData [][] planes,
st.getShellMap(), // shells, // int [][] shells,
1000, // int max_shells,
5000, // int max_shells,
clt_parameters.plFuse,// boolean fuse,
false, // boolean show_connections,
false, // boolean use_NaN,
......
......@@ -5,7 +5,7 @@
** Copyright (C) 2017 Elphel, Inc.
**
** -----------------------------------------------------------------------------**
**
**
** TileSurface.java is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
......@@ -31,7 +31,7 @@ import java.util.concurrent.atomic.AtomicInteger;
public class TileSurface {
// public
// public
// private int tileSize;
private int superTileSize;
private int imageTilesX;
......@@ -45,23 +45,23 @@ public class TileSurface {
private int [][] tileLayers = null;
private TileData [][] tileData = null;
private SurfaceData [] surfaceData = null; // result surfaces
static int STAT_UNASSIGNED = 0; // index of number of unassigned tiles
static int STAT_ASSIGNED = 1; // index of number of assigned tiles
static int STAT_PROHIBITED = 2; // index of number of initially prohibited tiles
static int STAT_IMPOSSIBLE = 3; // index of number of impossible (like no surfaces at that location) tiles
static int STAT_NUM_ML = 4; // index of number of measurement layers used
static int STAT_LEN = 5; // number of stat entries
static int UNASSIGNED = 0; //tile marked as invalid
static int PROHOBITED = -1; //tile marked as invalid
static int IMPOSSIBLE = -2; // give up on this tile (as no surface for it)
static int IMPOSSIBLE = -2; // give up on this tile (as no surface for it)
static int NEW_ASSIGNED = 0; // successfully assigned to a surface
static int NO_SURF = 1; // no surfaces for this tile cell
static int TOO_WEAK = 2; // tile strength is too low
static int TOO_STRONG = 3; // tile strength is too high ( for that disparity difference)
static int TOO_FAR = 4; // no surface candidates within the allowed disparity range
static int TOO_FAR = 4; // no surface candidates within the allowed disparity range
static int NOT_UNIQUE = 5; // multiple surfaces are within range
static int REMOVED_TILES = 6; // number of removed tiles in weak clusters
static int REMOVED_CLUSTERS = 7; // number of removed weak clusters
......@@ -75,9 +75,9 @@ public class TileSurface {
static int CLUST_NUM_CONFLICTS_A = 5;
static int CLUST_NUM_CONFLICTS_B = 6;
static int CLUST_NUM_STATS = 7;
// private int nsTilesstSize = 0; // 8;
GeometryCorrection geometryCorrection = null;
public TileSurface(
......@@ -100,10 +100,10 @@ public class TileSurface {
int [] dirs = {-stilesX, -stilesX + 1, 1, stilesX + 1, stilesX, stilesX - 1, -1, -stilesX - 1};
this.st_dirs8 = dirs;
int tx = superTileSize * stilesX;
int tx = superTileSize * stilesX;
int [] tdirs = {-tx, -tx + 1, 1, tx + 1, tx, tx - 1, -1, -tx - 1};
this.t_dirs8 = tdirs;
}
public int getThreadsMax(){
return this.threadsMax;
......@@ -114,7 +114,7 @@ public class TileSurface {
public int getSTilesY(){
return stilesY;
}
public int getSuperTileSize(){
return superTileSize;
}
......@@ -130,12 +130,12 @@ public class TileSurface {
public int getImageTilesY(){
return imageTilesY;
}
public TileData [][] getTileData(){
return this.tileData;
}
public int [][] getTileLayersCopy()
{
if (this.tileLayers == null){
......@@ -149,11 +149,11 @@ public class TileSurface {
}
return this.tileLayers;
}
public void setTileLayers(int [][] tileLayers){
this.tileLayers = tileLayers;
}
public class TileData{
double [] disp_strength;
int indx = 0;
......@@ -169,7 +169,7 @@ public class TileSurface {
{
setDisparityStrength(disparity,strength);
}
/*
/*
public void setParentPlane (TilePlanes.PlaneData parent_plane)
{
this.parent_plane = parent_plane;
......@@ -190,21 +190,22 @@ public class TileSurface {
s+= "] ";
return s;
}
@Override
public String toString()
{
String s = " ";
s += getNeibString();
s += String.format( "index=%2d(%2d) parent = %3d:%1d disp=%8.5f weight=%8.5f",
new_index, indx,
parent_nsTile, parent_layer, disp_strength[0], disp_strength[1]);
return s;
}
public void setParentTileLayer (int parent_nsTile, int parent_layer)
{
this.parent_nsTile = parent_nsTile;
......@@ -215,8 +216,8 @@ public class TileSurface {
{
return this.parent_layer;
}
// public void setParentNsTile(int parent_nsTile)
// {
// this.parent_nsTile = parent_nsTile;
......@@ -236,37 +237,37 @@ public class TileSurface {
}
public int getIndex()
{
return this.indx;
return this.indx;
}
public void setNewIndex(int indx)
{
this.new_index = indx;
this.new_index = indx;
}
public int getNewIndex()
{
return this.new_index;
return this.new_index;
}
public void setNeighbors(int [] neighbors)
{
this.neighbors = neighbors;
this.neighbors = neighbors;
}
public int [] getNeighbors()
{
return this.neighbors;
return this.neighbors;
}
public void setNeighbor(int dir,int neib)
{
// if (this.neighbors == null) this.neighbors = new int[8];
this.neighbors[dir] = neib;
this.neighbors[dir] = neib;
}
public int getNeighbor(int dir)
{
// if (this.neighbors == null) this.neighbors = new int[8];
return this.neighbors[dir];
return this.neighbors[dir];
}
public void setEnable(boolean enable)
{
......@@ -285,7 +286,7 @@ public class TileSurface {
this.disp_strength[0] = disparity;
this.disp_strength[1] = strength;
}
public void setDisparity(double disparity)
{
if (this.disp_strength == null){
......@@ -314,7 +315,7 @@ public class TileSurface {
{
return getDisparity(true);
}
public void setStrength(double strength)
{
if (this.disp_strength == null){
......@@ -329,7 +330,7 @@ public class TileSurface {
}
return this.disp_strength[1];
}
}
public int getNStileDir(
......@@ -340,9 +341,9 @@ public class TileSurface {
int sty = nsTile / stilesX;
int stx = nsTile % stilesX;
if ((stx > 0) && (sty > 0) && (sty < (stilesY - 1)) && (stx < (stilesX - 1))) return nsTile + st_dirs8[dir]; // most likely case
if ((sty == 0) && ((dir < 2) || (dir == 7))) return -1;
if ((sty == (stilesY - 1)) && (dir > 2) && (dir < 6)) return -1;
if ((stx == 0) && (dir > 4)) return -1;
if ((sty == 0) && ((dir < 2) || (dir == 7))) return -1;
if ((sty == (stilesY - 1)) && (dir > 2) && (dir < 6)) return -1;
if ((stx == 0) && (dir > 4)) return -1;
if ((stx == (stilesX - 1)) && (dir > 0) && (dir < 4)) return -1;
return nsTile + st_dirs8[dir];
}
......@@ -357,24 +358,24 @@ public class TileSurface {
int ty = nTile / tilesX;
int tx = nTile % tilesX;
if ((tx > 0) && (ty > 0) && (ty < (tilesY - 1)) && (tx < (tilesX - 1))) return nTile + t_dirs8[dir]; // most likely case
if ((ty == 0) && ((dir < 2) || (dir == 7))) return -1;
if ((ty == (tilesY - 1)) && (dir > 2) && (dir < 6)) return -1;
if ((tx == 0) && (dir > 4)) return -1;
if ((ty == 0) && ((dir < 2) || (dir == 7))) return -1;
if ((ty == (tilesY - 1)) && (dir > 2) && (dir < 6)) return -1;
if ((tx == 0) && (dir > 4)) return -1;
if ((tx == (tilesX - 1)) && (dir > 0) && (dir < 4)) return -1;
return nTile + t_dirs8[dir];
}
public int getDirToStile(
int nsTile,
int nsTile1)
{
int sty = nsTile / stilesX;
int stx = nsTile % stilesX;
int sty1 = nsTile1 / stilesX;
int stx1 = nsTile1 % stilesX;
int dx = stx1 - stx;
int dx = stx1 - stx;
int dy = sty1 - sty;
// int sdx = (dx > 0) ? 1: ( (dx < 0) ? -1 : 0);
// int sdy = (dy > 0) ? 1: ( (dy < 0) ? -1 : 0);
......@@ -394,7 +395,7 @@ public class TileSurface {
return -1;
}
/**
* Get tile surface number from supertile number, direction (-1 same) and the supertile plane index
* @param nsTile number of the supertile
......@@ -417,7 +418,7 @@ public class TileSurface {
if (np >= tsn){
return -1;
}
return np;
return np;
}
int nsTile1 = -1;
for (int d = 0; d < dir; d ++){
......@@ -433,10 +434,10 @@ public class TileSurface {
return -1;
}
}
return tsn + np;
return tsn + np;
}
/**
* Get supertile direction and the plane number that contributeted to a specific tile surface
* Get supertile direction and the plane number that contributeted to a specific tile surface
* @param nsTile supertile index
* @param tp tile surface index (generated by getTileSurfaceNumber)
* @param planes array of the per-supertile, per plane plane data (each level can be null)
......@@ -460,7 +461,7 @@ public class TileSurface {
rslt[1] = tp;
return rslt;
}
tp -= num_planes;
tp -= num_planes;
}
return null; // error - invalid input
}
......@@ -471,7 +472,7 @@ public class TileSurface {
double [] wnd1d = new double [size];
for (int i = 0; i < size/2; i++){
wnd1d[i] = 0.5 * (1.0 - Math.cos(2*Math.PI*(i+0.5)/size));
wnd1d[size - i -1] = wnd1d[i];
wnd1d[size - i -1] = wnd1d[i];
}
double [] wnd = new double [size * size];
int indx = 0;
......@@ -482,7 +483,7 @@ public class TileSurface {
}
return wnd;
}
public double [] getWindow()
{
return window;
......@@ -510,8 +511,8 @@ public class TileSurface {
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final double [][][][] fused_data = new double [nStiles][][][];
// final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final Thread[] threads = ImageDtt.newThreadArray((debugLevel > 1)? 1 : threadsMax);
......@@ -519,6 +520,7 @@ public class TileSurface {
final int dbg_tile = dbg_Y * stilesX + dbg_X;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) {
if (planes[nsTile] != null) {
......@@ -539,9 +541,9 @@ public class TileSurface {
fraction_uni, // double fraction_uni,
debugLevel-1); // int debugLevel)
// multiply disparities by strengths to calculate weighted averages
double [] disp0 = disp_strength[np][0].clone(); // to use where strength == 0
double [] disp0 = disp_strength[np][0].clone(); // to use where strength == 0
for (int i = 0; i < disp_strength[np][1].length; i++){
disp_strength[np][0][i] *= disp_strength[np][1][i];
disp_strength[np][0][i] *= disp_strength[np][1][i];
}
// }
for (int dir = 0; dir < st_dirs8.length; dir++){
......@@ -560,17 +562,17 @@ public class TileSurface {
debugLevel-1); // int debugLevel)
for (int i = 0; i < disp_strength[np][1].length; i++){
if (ds[1][i] > 0.0){
disp_strength[np][1][i] += ds[1][i];
disp_strength[np][0][i] += ds[1][i] * ds[0][i];
disp_strength[np][1][i] += ds[1][i];
disp_strength[np][0][i] += ds[1][i] * ds[0][i];
}
}
}
}
}
}
// calculate weighted average for each tile
for (int i = 0; i < disp_strength[np][1].length; i++){
for (int i = 0; i < disp_strength[np][1].length; i++){
if (disp_strength[np][1][i] > 0.0){
disp_strength[np][0][i] /= disp_strength[np][1][i];
disp_strength[np][0][i] /= disp_strength[np][1][i];
} else {
disp_strength[np][0][i] = disp0[i]; // may have discontinuity
}
......@@ -580,10 +582,10 @@ public class TileSurface {
if (sel != null){
for (int i = 0; i < disp_strength[np][1].length; i++){
if (!sel[i]) disp_strength[np][1][i] = 0.0;
}
}
}
}
if ((debugLevel > -1) && (dl>0)){
String str_neib = "fuseSupertilePlanes_"+nsTile+":"+np;
for (int dir = 0; dir < 8; dir++){
......@@ -602,8 +604,8 @@ public class TileSurface {
titles [i + 1 * disp_strength.length] = "mdisp_" + i;
titles [i + 2 * disp_strength.length] = "str_" + i;
if (disp_strength[i] != null) {
dbg_img[i + 0 * disp_strength.length] = disp_strength[i][0];
dbg_img[i + 2 * disp_strength.length] = disp_strength[i][1];
dbg_img[i + 0 * disp_strength.length] = disp_strength[i][0];
dbg_img[i + 2 * disp_strength.length] = disp_strength[i][1];
dbg_img[i + 1 * disp_strength.length] = disp_strength[i][0].clone();
for (int j = 0; j < disp_strength[i][0].length; j++){
if (disp_strength[i][1][j] == 0.0){
......@@ -615,12 +617,12 @@ public class TileSurface {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays();
sdfa_instance.showArrays(dbg_img, 2 * superTileSize, 2 * superTileSize, true, "surf_ds_"+nsTile, titles);
}
}
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return fused_data;
}
......@@ -642,8 +644,8 @@ public class TileSurface {
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
// final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
// final int nTiles = nStiles * superTileSize * superTileSize;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int [][] dir_corn = {
......@@ -651,12 +653,13 @@ public class TileSurface {
{ 0, 1, -1, 2}, // 1 (top right)
{ 6, -1, 5, 4}, // 2 (bottom left)
{-1, 2, 4, 3}}; // 3 (bottom right)
final int [][][][] corners = new int [nStiles][][][];
final int dbg_tile = dbg_Y * stilesX + dbg_X;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) {
if (planes[nsTile] != null) {
......@@ -671,7 +674,7 @@ public class TileSurface {
int [] neibs = planes[nsTile][np].getNeibBest();
if (neibs == null) {
System.out.println("getSupertilesTopology(), nsTile = "+nsTile+" neibs= null");
} else {
corners[nsTile][np]= new int [4][4];
for (int i= 0; i < 4; i++){
......@@ -679,7 +682,7 @@ public class TileSurface {
if (dir_corn[i][j] < 0){
corners[nsTile][np][i][j] = np;
} else {
corners[nsTile][np][i][j] = neibs[dir_corn[i][j]];
corners[nsTile][np][i][j] = neibs[dir_corn[i][j]];
}
}
}
......@@ -690,11 +693,11 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return corners;
}
/**
* Calculate per-tile surface data (TileData) including disparity, strength, and 8 neighbors indices
* @param planes array of the per-supertile, per plane plane data (each level can be null)
......@@ -706,22 +709,22 @@ public class TileSurface {
*/
public int [][][][][] generateOverlappingMeshes (
final TilePlanes.PlaneData [][] planes,
final int [][][][] corners,
final int [][][][] corners,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
// final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
// final int nTiles = nStiles * superTileSize * superTileSize;
final int [][][][][] meshes = new int [nStiles][][][][];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int ss2 = 2 * superTileSize;
final int ss1 = superTileSize;
final int sh = superTileSize/2;
final int len_st2 = ss2 * ss2 ;
final int [][][] quad_check = { // [quadrant 01/23][dir: left, right, diagonal]{dir, quadrant}
{ // top left quadrant
{6, 1}, //left
......@@ -760,13 +763,14 @@ public class TileSurface {
{3 * sh * ss2 + ss1, ss1, sh}
},
};
final TileNeibs tileNeibs = new TileNeibs(2*superTileSize);
final int dbg_tile = dbg_Y * stilesX + dbg_X;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) {
if (planes[nsTile] != null) {
......@@ -808,7 +812,7 @@ public class TileSurface {
// change diagonal first (add corner square later again
if (neib_id[2] != null){
switch (quadrant){
case 0: // top left
case 0: // top left
for (int j = 0; j < (ss1 - 1); j++){
for (int i = ss1 - 2 - j; i>=0; i--){
pre_mesh[i * ss2 + j] = neib_id[2];
......@@ -843,14 +847,14 @@ public class TileSurface {
for (int y = 0; y < cut_ortho[quadrant][arr][2]; y++){
for (int x = 0; x < cut_ortho[quadrant][arr][1]; x++){
int indx = cut_ortho[quadrant][arr][0] + y * ss2 + x;
pre_mesh[indx] = neib_id[arr];
pre_mesh[indx] = neib_id[arr];
}
}
}
// add corner square corner on top of possible ortho
if (neib_id[2] != null){
switch (quadrant){
case 0: // top left
case 0: // top left
for (int j = 0; j < sh; j++){
for (int i = 0 ; i < sh; i++){
pre_mesh[i * ss2 + j] = neib_id[2];
......@@ -883,11 +887,11 @@ public class TileSurface {
}
// build mesh , then add cuts if needed
meshes[nsTile][np] = new int [len_st2][][];
int [][][] dbg_meshes = meshes[nsTile][np];
int [][][] dbg_meshes = meshes[nsTile][np];
if (dl > 0){
System.out.println("generateOverlappingMeshes(), dbg_meshes.length = "+dbg_meshes.length);
}
for (int i = 0; i < len_st2; i ++){
if ((pre_mesh[i] != null) && (pre_mesh[i][0] == nsTile)){
meshes[nsTile][np][i] = new int [8][];
......@@ -912,16 +916,16 @@ public class TileSurface {
case 6: dir_go = 6; dir_start = 6; cut_right = true; break;
case 7: dir_go = 6; dir_start = 7; cut_right = false; break;
}
int dir_go45 = (dir_go + (cut_right ? 1:7)) % 8;
int dir_go90 = (dir_go + (cut_right ? 2:6)) % 8;
int dir_go135 = (dir_go + (cut_right ? 3:5)) % 8;
int dir_go180 = (dir_go + 4) % 8;
int dir_go45 = (dir_go + (cut_right ? 1:7)) % 8;
int dir_go90 = (dir_go + (cut_right ? 2:6)) % 8;
int dir_go135 = (dir_go + (cut_right ? 3:5)) % 8;
int dir_go180 = (dir_go + 4) % 8;
indx = ss1 * (ss2 + 1); // center point
for (int i = 0; i < sh; i++) indx = tileNeibs.getNeibIndex(indx, dir_go);
if (dir_start >= 0) indx = tileNeibs.getNeibIndex(indx, dir_start);
int indx1 = tileNeibs.getNeibIndex(indx, dir_go90);
// if ((pre_mesh[indx] != null) && (pre_mesh[indx1] == null)){ // there is a cut
// if ((pre_mesh[indx][0] == nsTile) && (pre_mesh[indx1][0] != nsTile)){ // there is a cut
......@@ -937,7 +941,7 @@ public class TileSurface {
}
indx = tileNeibs.getNeibIndex(indx, dir_go);
}
}
}
}
}
}
......@@ -945,7 +949,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return meshes;
}
......@@ -960,16 +964,16 @@ public class TileSurface {
* @param dbg_Y debug supertile Y coordinate
* @return per-tile (rounded up to contain whole supertiles) sparse array of TileData instances
*/
public TileData [][] createTileShells (
final TilePlanes.PlaneData [][] planes,
final double [][][][] fusedSupertilePlanes,
final int [][][][][] lappingMeshes,
final int [][][][][] lappingMeshes,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
final int nStiles = stilesX * stilesY;
final int tilesX = stilesX * superTileSize;
final int tilesY = stilesY * superTileSize;
final int nTiles = nStiles * superTileSize * superTileSize;
......@@ -980,12 +984,13 @@ public class TileSurface {
final int sh = superTileSize/2;
final int len2 = ss2 * ss2 ;
final TileNeibs tileNeibs = new TileNeibs(2 * superTileSize);
final int dbg_tile = dbg_Y * stilesX + dbg_X;
// initialize result structure
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) {
int dl = ((debugLevel > -1) && (nsTile == dbg_tile)) ? 3:0;
......@@ -996,29 +1001,30 @@ public class TileSurface {
nsTile, // int nsTile,
8, // int dir, // direction, or -1 (same)
0, // int np,
planes); // TilePlanes.PlaneData [][] planes)
if (num_surf > 0) { // 0 - nothing in this supertile, none around - remove
if (num_surf > 0) { // 0 - nothing in this supertile, none around
int stileY = nsTile / stilesX;
planes); // TilePlanes.PlaneData [][] planes)
if (num_surf > 0) { // 0 - nothing in this supertile, none around - remove
if (num_surf > 0) { // 0 - nothing in this supertile, none around
int stileY = nsTile / stilesX;
int stileX = nsTile % stilesX;
for (int ty = 0; ty < superTileSize; ty++){
for (int tx = 0; tx < superTileSize; tx++){
int indx = ((stileY * superTileSize) + ty) * tilesX + ((stileX * superTileSize) + tx);
int indx = ((stileY * superTileSize) + ty) * tilesX + ((stileX * superTileSize) + tx);
tile_data[indx] = new TileData[num_surf];
}
}
}
}
}
}
};
}
}
ImageDtt.startAndJoin(threads);
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nsTile = ai.getAndIncrement(); nsTile < nStiles; nsTile = ai.getAndIncrement()) {
if (planes[nsTile] != null) {
......@@ -1026,11 +1032,11 @@ public class TileSurface {
if (dl > 0){
System.out.println("createTileShells():2, nsTile = "+nsTile);
}
int stileY = nsTile / stilesX;
int stileY = nsTile / stilesX;
int stileX = nsTile % stilesX;
// for (int np = 0; np < planes[nsTile].length; np++) if (planes[nsTile][np] != null){
for (int np = 0; np < planes[nsTile].length; np++) if ((planes[nsTile][np] != null) && (planes[nsTile][np].getWeight() > 0.0)){
int [][][] src_mesh = lappingMeshes[nsTile][np];
double [][] disp_strength = fusedSupertilePlanes[nsTile][np];
TileData [] dual_mesh = new TileData [len2]; // full overlapping dual-sized mesh
......@@ -1058,15 +1064,15 @@ public class TileSurface {
// dual_mesh[indx].setParentNsTile(nsTile);
dual_mesh[indx].setParentTileLayer(nsTile, np);
// dual_mesh[indx].setParentPlane(planes[nsTile][np]);
int dirThisfrom0 = getDirToStile(nsTile0, nsTile); // can be -1;
int surf0 = getTileSurfaceNumber ( // Number of the surface for the tile itself
nsTile0, // int nsTile,
dirThisfrom0, // int dir, // direction, or -1 (same)
np, // int np,
planes);
planes);
dual_mesh[indx].setIndex(surf0);
for (int dir = 0; dir < 8; dir++) {
if (src_neibs[dir] != null){
int nsTile1 = src_neibs[dir][0];
......@@ -1085,7 +1091,7 @@ public class TileSurface {
nsTile2, // int nsTile,
dir1from2, // int dir, // direction, or -1 (same)
np1, // int np,
planes);
planes);
dual_mesh[indx].setNeighbor(dir, surf);
}
}
......@@ -1114,10 +1120,10 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return tile_data;
}
}
public TileData [][] compactSortShells (
final TileData [][] tileData_src,
......@@ -1125,8 +1131,8 @@ public class TileSurface {
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final TileData [][] tile_data = new TileData [nTiles][];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
......@@ -1135,9 +1141,10 @@ public class TileSurface {
final int dbg_tilesX = stilesX * superTileSize;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
int dbg_stX = nTile % dbg_tilesX;
int dbg_stX = nTile % dbg_tilesX;
int dbg_stY = nTile / dbg_tilesX;
int dbg_st = (dbg_stY / superTileSize) * stilesX + (dbg_stX / superTileSize);
int dl = ((debugLevel > -1) && (dbg_st == dbg_stile)) ? 3:0;
......@@ -1175,16 +1182,17 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
TileData [][] tileData_src_dbg= tileData_src;
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
int dbg_stX = nTile % dbg_tilesX;
int dbg_stX = nTile % dbg_tilesX;
int dbg_stY = nTile / dbg_tilesX;
int dbg_st = (dbg_stY / superTileSize) * stilesX + (dbg_stX / superTileSize);
int dl = ((debugLevel > -1) && (dbg_st == dbg_stile)) ? 3:0;
......@@ -1200,13 +1208,13 @@ public class TileSurface {
if (nTile1 >= 0) {
if ((tile_data[nTile1] == null) || (tileData_src[nTile1][neibs[dir]] == null)){
int dbg_sstile = tile_data[nTile][i].getParentNsTile();
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileY = dbg_sstile / stilesX;
int dbg_tx = nTile % dbg_tilesX;
int dbg_ty = nTile / dbg_tilesX;
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
System.out.println("Null tile: "+nTile1+ " from "+nTile+", i="+i+", dir = "+dir+
", dbg_stX="+dbg_stX+", dbg_stY="+dbg_stY+", dbg_st="+dbg_st+", neibs[dir]="+neibs[dir]+
" dbg_nsTile = "+dbg_sstile +" ("+dbg_stileX+":"+dbg_stileY+")"+
......@@ -1217,7 +1225,7 @@ public class TileSurface {
// neibs[dir] = tile_data[nTile1][neibs[dir]].getNewIndex();
neibs[dir] = tileData_src[nTile1][neibs[dir]].getNewIndex();
}
}
}
}
......@@ -1226,20 +1234,20 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return tile_data;
}
public void checkShellsConnections (
final TileData [][] tileData,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
// final TileData [][] tile_data = new TileData [nTiles][];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
......@@ -1248,10 +1256,11 @@ public class TileSurface {
final int dbg_tilesX = stilesX * superTileSize;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
int dbg_stX = nTile % dbg_tilesX;
int dbg_stX = nTile % dbg_tilesX;
int dbg_stY = nTile / dbg_tilesX;
int dbg_st = (dbg_stY / superTileSize) * stilesX + (dbg_stX / superTileSize);
int dl = ((debugLevel > -1) && (dbg_st == dbg_stile)) ? 3:0;
......@@ -1269,12 +1278,12 @@ public class TileSurface {
if ((tileData[nTile1] == null) || (tileData[nTile1][neibs[dir]] == null)){
if (debugLevel > -1) {
int dbg_sstile = tileData[nTile][nl].getParentNsTile();
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileY = dbg_sstile / stilesX;
int dbg_tx = nTile % dbg_tilesX;
int dbg_ty = nTile / dbg_tilesX;
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
System.out.println("Broken link: "+nTile1+ " from "+nTile+", nl="+nl+", dir = "+dir+
", dbg_stX="+dbg_stX+", dbg_stY="+dbg_stY+", dbg_st="+dbg_st+", neibs[dir]="+neibs[dir]+
......@@ -1288,12 +1297,12 @@ public class TileSurface {
if (neibs_other[(dir + 4) % 8] != nl){
if (debugLevel > -1) {
int dbg_sstile = tileData[nTile][nl].getParentNsTile();
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileY = dbg_sstile / stilesX;
int dbg_tx = nTile % dbg_tilesX;
int dbg_ty = nTile / dbg_tilesX;
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
System.out.println("Link not mutual: "+nTile1+ " from "+nTile+", nl="+nl+", dir = "+dir+
", dbg_stX="+dbg_stX+", dbg_stY="+dbg_stY+", dbg_st="+dbg_st+", neibs[dir]="+neibs[dir]+
......@@ -1311,22 +1320,22 @@ public class TileSurface {
}
}
}
}
}
}
}
};
}
}
ImageDtt.startAndJoin(threads);
}
}
public void addBackShellsConnections (
final TileData [][] tileData,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
// final TileData [][] tile_data = new TileData [nTiles][];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
......@@ -1335,10 +1344,11 @@ public class TileSurface {
final int dbg_tilesX = stilesX * superTileSize;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
int dbg_stX = nTile % dbg_tilesX;
int dbg_stX = nTile % dbg_tilesX;
int dbg_stY = nTile / dbg_tilesX;
int dbg_st = (dbg_stY / superTileSize) * stilesX + (dbg_stX / superTileSize);
int dl = ((debugLevel > -1) && (dbg_st == dbg_stile)) ? 3:0;
......@@ -1356,12 +1366,12 @@ public class TileSurface {
if ((tileData[nTile1] == null) || (tileData[nTile1][neibs[dir]] == null)){
if (debugLevel > 0) {
int dbg_sstile = tileData[nTile][nl].getParentNsTile();
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileY = dbg_sstile / stilesX;
int dbg_tx = nTile % dbg_tilesX;
int dbg_ty = nTile / dbg_tilesX;
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
System.out.println("Broken link: "+nTile1+ " from "+nTile+", nl="+nl+", dir = "+dir+
", dbg_stX="+dbg_stX+", dbg_stY="+dbg_stY+", dbg_st="+dbg_st+", neibs[dir]="+neibs[dir]+
......@@ -1375,12 +1385,12 @@ public class TileSurface {
if (neibs_other[(dir + 4) % 8] != nl){
if (debugLevel > 0) {
int dbg_sstile = tileData[nTile][nl].getParentNsTile();
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileX = dbg_sstile % stilesX;
int dbg_stileY = dbg_sstile / stilesX;
int dbg_tx = nTile % dbg_tilesX;
int dbg_ty = nTile / dbg_tilesX;
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
int dbg_dx = dbg_tx - (superTileSize * dbg_stileX + superTileSize/2);
int dbg_dy = dbg_ty - (superTileSize * dbg_stileY + superTileSize/2);
System.out.println("Link not mutual: "+nTile1+ " from "+nTile+", nl="+nl+", dir = "+dir+
", dbg_stX="+dbg_stX+", dbg_stY="+dbg_stY+", dbg_st="+dbg_st+", neibs[dir]="+neibs[dir]+
......@@ -1407,26 +1417,26 @@ public class TileSurface {
}
}
}
}
}
}
}
};
}
}
ImageDtt.startAndJoin(threads);
}
}
public int getTileLayersNumber (
final TileData [][] tileData)
{
int num = 0;
for (int i = 0; i < tileData.length; i++){
if ((tileData[i] != null) && (tileData[i].length > num )){
num = tileData[i].length;
num = tileData[i].length;
}
}
return num;
......@@ -1437,19 +1447,20 @@ public class TileSurface {
int [] wh = {stilesX*superTileSize, stilesY*superTileSize};
return wh;
}
public double [][][] getTileDisparityStrengths (
final TileData [][] tileData,
final boolean useNaN)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int numLayers = getTileLayersNumber(tileData);
final double [][][] disp_strength = new double [numLayers][2][tileData.length];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
if (tileData[nTile] != null){
......@@ -1470,21 +1481,22 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return disp_strength;
}
public int [][][] getTileConnections (
final TileData [][] tileData)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int numLayers = getTileLayersNumber(tileData);
final int [][][] connections = new int [numLayers][tileData.length][8];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
if (tileData[nTile] != null) {
......@@ -1501,7 +1513,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return connections;
}
......@@ -1509,14 +1521,15 @@ public class TileSurface {
public int [][] getTileGenerator (
final TileData [][] tileData)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int numLayers = getTileLayersNumber(tileData);
final int [][] generators = new int [numLayers][tileData.length];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
if (tileData[nTile] != null) {
......@@ -1529,20 +1542,21 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return generators;
}
public int [] getNumSurfaces (
final TileData [][] tileData)
{
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final int nStiles = stilesX * stilesY;
final int nTiles = nStiles * superTileSize * superTileSize;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int [] surfaces = new int [tileData.length];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
if (tileData[nTile] != null) {
......@@ -1553,12 +1567,12 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return surfaces;
}
public void showSurfaceDS (
TileData [][] tileData,
String title)
......@@ -1596,17 +1610,17 @@ public class TileSurface {
for (int j = 0; j < surfaces.length; j++){
img_data[5 * tds.length][j] =surfaces[j];
}
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays();
sdfa_instance.showArrays(img_data, wh[0], wh[1], true, title, titles);
}
/**
* Calculate per-tile surface data (TileData) including disparity, strength, and 8 neighbors indices
* @param use_sel use plane selection (this.sel_mask) to select only some part of the plane
* @param divide_by_area divide weights by ellipsoid area
* @param scale_projection use plane ellipsoid projection for weight: 0 - do not use, > 0 linearly
* scale ellipsoid (enlarge)
* scale ellipsoid (enlarge)
* @param fraction_uni add fraction of the total weight to each tile
* @param planes array of the per-supertile, per plane plane data (each level can be null)
* @param debugLevel debug level
......@@ -1634,7 +1648,7 @@ public class TileSurface {
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y);
int [][][][] surf_topology = getSupertilesTopology (
planes, // final TilePlanes.PlaneData [][] planes,
debugLevel, // final int debugLevel,
......@@ -1642,14 +1656,14 @@ public class TileSurface {
dbg_Y); // final int dbg_Y);
int [][][][][] overlapped_meshes = generateOverlappingMeshes (
planes, // final TilePlanes.PlaneData [][] planes,
surf_topology , // final int [][][][] corners,
surf_topology , // final int [][][][] corners,
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y);
TileData [][] tileData = createTileShells (
planes, // final TilePlanes.PlaneData [][] planes,
fused_planes, // final double [][][][] fusedSupertilePlanes,
overlapped_meshes, // final int [][][][][] lappingMeshes,
overlapped_meshes, // final int [][][][][] lappingMeshes,
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y);
......@@ -1659,7 +1673,7 @@ public class TileSurface {
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y);
//
//
System.out.println("checkShellsConnections()");
checkShellsConnections (
tileData, // final TileData [][] tileData_src,
......@@ -1671,13 +1685,13 @@ public class TileSurface {
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y);
if (debugLevel >- 1) {
if (debugLevel >- 2) {
showSurfaceDS (tileData, "tileData");
}
this.tileData = tileData;
return tileData;
}
public int [] getTilesAssignStats(
final int [][] tileLayers)
{
......@@ -1693,25 +1707,26 @@ public class TileSurface {
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
int numThread = ai_numThread.getAndIncrement(); // unique number of thread to write to rslt_diffs[numThread]
for (int nTile = ai.getAndIncrement(); nTile < tileLayers[fml].length; nTile = ai.getAndIncrement()) {
if (tileLayers[fml][nTile] > 0){ // index + 1
stats_all[numThread][STAT_ASSIGNED] ++;
stats_all[numThread][STAT_ASSIGNED] ++;
} else if (tileLayers[fml][nTile] == UNASSIGNED) {
stats_all[numThread][STAT_UNASSIGNED] ++;
stats_all[numThread][STAT_UNASSIGNED] ++;
} else if (tileLayers[fml][nTile] == PROHOBITED) {
stats_all[numThread][STAT_PROHIBITED] ++;
stats_all[numThread][STAT_PROHIBITED] ++;
} else if (tileLayers[fml][nTile] == IMPOSSIBLE) {
stats_all[numThread][STAT_IMPOSSIBLE] ++;
stats_all[numThread][STAT_IMPOSSIBLE] ++;
} else {
System.out.println("Bug in getTilesAssignStats(): tileLayers["+fml+"]["+nTile+"]="+tileLayers[fml][nTile]);
stats_all[numThread][0] ++; // prohibited
stats_all[numThread][0] ++; // prohibited
}
}
}
};
}
}
ImageDtt.startAndJoin(threads);
stats[STAT_NUM_ML]++; // number of non-null measurement layers
}
......@@ -1722,7 +1737,7 @@ public class TileSurface {
}
return stats;
}
public double getNormDispFromSurface(
double disp_tile,
double disp_surf,
......@@ -1735,7 +1750,7 @@ public class TileSurface {
return (disp_tile - disp_surf) * disp_norm / disp_avg;
}
}
/**
* Convert from image tile index to the surface tile index (surface tiles are all
* full superTileSize),
......@@ -1770,8 +1785,8 @@ public class TileSurface {
}
/**
* Convert from surface tile index (surface tiles are all full superTileSize) to
* the image tile index
......@@ -1792,7 +1807,7 @@ public class TileSurface {
}
return imageTilesX * ty + tx;
}
/**
* Grow around surface tile, without going back. May still produce multi-layer result
* that will need to be filtered. Used to fill large flat gaps
......@@ -1889,9 +1904,9 @@ public class TileSurface {
}
return simple_surf;
}
public double [][] getShowSimpleConnected(
int nsTile0,
boolean [][] simple_surf)
......@@ -1901,7 +1916,7 @@ public class TileSurface {
if (simple_surf[nsTile] != null){
int num_filled = 0;
for (int i = 0; i < simple_surf[nsTile].length; i++) if (simple_surf[nsTile][i]) num_filled++;
if (num_filled > numSurfaces) numSurfaces = num_filled;
if (num_filled > numSurfaces) numSurfaces = num_filled;
}
}
double [][] img_data = new double [numSurfaces][tileData.length];
......@@ -1930,7 +1945,7 @@ public class TileSurface {
if (simple_surf[nsTile] != null){
int num_filled = 0;
for (int i = 0; i < simple_surf[nsTile].length; i++) if (simple_surf[nsTile][i] !=0) num_filled++;
if (num_filled > numSurfaces) numSurfaces = num_filled;
if (num_filled > numSurfaces) numSurfaces = num_filled;
}
}
double [][] img_data = new double [numSurfaces][tileData.length];
......@@ -1960,7 +1975,7 @@ public class TileSurface {
if (simple_surf[nsTile] != null){
int num_filled = 0;
for (int i = 0; i < simple_surf[nsTile].length; i++) if (simple_surf[nsTile][i] !=0) num_filled++;
if (num_filled > numSurfaces) numSurfaces = num_filled;
if (num_filled > numSurfaces) numSurfaces = num_filled;
}
}
double [][] img_data = new double [numSurfaces][tileData.length];
......@@ -1968,7 +1983,7 @@ public class TileSurface {
if (simple_surf[nsTile] != null){
int ns = 0;
for (int nl = 0; nl < simple_surf[nsTile].length; nl++) if (simple_surf[nsTile][nl] != 0){
img_data[ns][nsTile] = show_distance? simple_surf[nsTile][nl]: nl;
ns++;
}
......@@ -1979,9 +1994,9 @@ public class TileSurface {
}
return img_data;
}
public void testSimpleConnected(
int tileX,
int tileY)
......@@ -1994,10 +2009,10 @@ public class TileSurface {
15, // int height,
1, // int extraH,
1); // int extraV)
}
public void testSimpleConnected(
int nsTile)
{
......@@ -2012,7 +2027,7 @@ public class TileSurface {
data[nl] = getShowSimpleConnected(nsTile, simple_surf[nl]);
dist[nl] = getShowSimpleConnectedDistanceLayer(true,simple_surf[nl]);
layer[nl] = getShowSimpleConnectedDistanceLayer(false,simple_surf[nl]);
num_layers += data[nl].length;
num_layers += data[nl].length;
}
double [][] img_data = new double [num_layers * 3][];
String [] titles = new String [num_layers * 3];
......@@ -2030,7 +2045,7 @@ public class TileSurface {
}
sdfa_instance.showArrays(img_data, stilesX * superTileSize, stilesY * superTileSize, true, "simple_"+nsTile, titles);
}
public void printSurfaceConnections(
int left,
int top,
......@@ -2045,7 +2060,7 @@ public class TileSurface {
for (int tileX = left; tileX < (left + width); tileX++) {
int nsTile = tileY * tilesX + tileX;
if (tileData[nsTile] != null){
if (tileData[nsTile].length > numSurfaces) numSurfaces = tileData[nsTile].length;
if (tileData[nsTile].length > numSurfaces) numSurfaces = tileData[nsTile].length;
}
}
}
......@@ -2093,7 +2108,7 @@ public class TileSurface {
} else {
System.out.println();
}
}
}
}
if (tileY < (top + height -1)){
System.out.print(vert_gap);
......@@ -2101,13 +2116,13 @@ public class TileSurface {
}
}
}
/**
* Assign tiles to a certain disparity surface if there is only one surface candidate
* @param noEdge do not assign tiles to the surface edges (can not add border later)
* @param maxDiff maximal (normalized) disparity difference
* @param minDiffOther minimal disparity difference to closest 2-nd place candidate
* @param minStrength minimal processed (floor subtracted) correlation strength of the candidate
* @param minStrength minimal processed (floor subtracted) correlation strength of the candidate
* @param maxStrength maximal processed (floor subtracted) correlation strength of the candidate
* @param moveDirs +1 - allow moving tile closer to the camera (increase disparity, +2 - allow moving away
* @param dispNorm disparity normalization - disparity difference with average above it will be scaled down
......@@ -2118,7 +2133,7 @@ public class TileSurface {
* @param debugLevel debug level
* @param dbg_X debug tile X coordinate
* @param dbg_Y debug tile Y coordinate
* @return
* @return
*/
public int [] assignTilesToSingleCandidate_old( // not used
final boolean noEdge,
......@@ -2133,7 +2148,7 @@ public class TileSurface {
final int dbg_X,
final int dbg_Y)
{
int [] stats_new = new int [NUM_STATS];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final int numThreads = threads.length;
......@@ -2148,6 +2163,7 @@ public class TileSurface {
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
int numThread = ai_numThread.getAndIncrement(); // unique number of thread to write to rslt_diffs[numThread]
for (int nTile = ai.getAndIncrement(); nTile < tileLayers[fml].length; nTile = ai.getAndIncrement()) {
......@@ -2209,7 +2225,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
}
for (int nt = 0; nt < numThreads; nt ++){
......@@ -2258,7 +2274,7 @@ public class TileSurface {
if (combo[nTile] != 1 ){
img_data[num_in][nTile] = Double.NaN;
}
img_data[num_in+1][nTile] = combo[nTile];
}
if (debugLevel > -1) {
......@@ -2266,7 +2282,7 @@ public class TileSurface {
sdfa_instance.showArrays(img_data, imageTilesX, imageTilesY, true, "consensus",titles);
}
}
public int [][] getConsensusAssignment(
final int min_agree,
int [][][] opinions_in, // options contain 1-based surface indices
......@@ -2297,7 +2313,7 @@ public class TileSurface {
for (int n = 0; n < num_in; n++)if (tileAssignments[n] != null){
int surf1 = tileAssignments[n][ml][nTile];
if (surf1 != 0){
consensus[ml][nTile] = surf1;
consensus[ml][nTile] = surf1;
if (surf1 < 0) { // prohibited
break;
} else { // surface
......@@ -2320,13 +2336,13 @@ public class TileSurface {
}
}
}
}
}
return consensus;
}
/**
* Assign tiles that were used to generate planes. Only tiles in the center (non-overlapping) part of the supertile
* @param force re-assign tile if it was already assigned
......@@ -2337,7 +2353,7 @@ public class TileSurface {
* @param dbg_Y
* @return
*/
public int [] assignPlanesTiles(
final boolean force,
final int [][] tileLayers,
......@@ -2353,14 +2369,15 @@ public class TileSurface {
// final TileNeibs tnImage = new TileNeibs(imageTilesX, imageTilesY);
// final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final int debug_stile = dbg_Y * stilesX + dbg_X;
final Thread[] threads = ImageDtt.newThreadArray((debugLevel > 1)? 1 : threadsMax);
final Thread[] threads = ImageDtt.newThreadArray((debugLevel > 1)? 1 : threadsMax);
final int numThreads = threads.length;
final int [][] stats_all = new int [numThreads][stats_new.length];
final AtomicInteger ai_numThread = new AtomicInteger(0);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
int numThread = ai_numThread.getAndIncrement(); // unique number of thread to write to rslt_diffs[numThread]
for (int nsTile = ai.getAndIncrement(); nsTile < nsTiles; nsTile = ai.getAndIncrement()) if (planes[nsTile] != null){
......@@ -2371,7 +2388,7 @@ public class TileSurface {
if (dl > 2){
System.out.println("assignPlanesTiles(): nsTile = " + nsTile);
}
for (int np = 0; np < planes[nsTile].length; np++) if (planes[nsTile][np] != null){
boolean [][] meas_sel = planes[nsTile][np].getMeasSelection();
// is it needed or is tileLayers already initialized?
......@@ -2407,14 +2424,14 @@ public class TileSurface {
(superTileSize * sty + dy)+")" +
" dx:y="+dx+":"+dy+" nTile="+nTile+" nSurfTile="+nSurfTile+" ns="+ns);
}
if (ns < 0) {
System.out.println("assignPlanesTiles(): BUG? Could not find a surface with parent supertile "+
nsTile+":"+np+" for image tile = "+nTile+" ("+ (superTileSize * stx + dx)+"/"+
(superTileSize * sty + dy)+")");
}
}
if ((ns >= 0) && (force || (tileLayers[ml][nTile] == 0))) {
boolean bad_edge = noEdge;
if (bad_edge) {
......@@ -2441,7 +2458,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
for (int nt = 0; nt < numThreads; nt ++){
for (int i = 0 ; i < stats_new.length; i++ ){
......@@ -2450,7 +2467,7 @@ public class TileSurface {
}
return stats_new;
}
/**
* Assign tiles to a disparity surface if there is only one surface at all
* @param tileLayers per measured layer, per tile: assigned index plus1, 0 - empty, or negative - prohibited
......@@ -2467,7 +2484,7 @@ public class TileSurface {
final int dbg_X,
final int dbg_Y)
{
int [] stats_new = new int [NUM_STATS];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final int numThreads = threads.length;
......@@ -2480,6 +2497,7 @@ public class TileSurface {
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
int numThread = ai_numThread.getAndIncrement(); // unique number of thread to write to rslt_diffs[numThread]
for (int nTile = ai.getAndIncrement(); nTile < tileLayers[fml].length; nTile = ai.getAndIncrement()) {
......@@ -2515,7 +2533,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
}
for (int nt = 0; nt < numThreads; nt ++){
......@@ -2525,7 +2543,7 @@ public class TileSurface {
}
return stats_new;
}
/**
* Grow assigned tiles while strength is below maxStrength OR normalized (dispNorm) disparity error is below
* maxDiff AND new tile is unassigned. Combines all measured layers, assumes same cell was not assigned to
......@@ -2550,7 +2568,7 @@ public class TileSurface {
* @param dbg_Y debug tile Y coordinate
* @return statistics array
*/
public int [] growWeakAssigned(
final int [][] tileLayers,
final int [][] conflicts,
......@@ -2568,7 +2586,7 @@ public class TileSurface {
// final int tiles = stilesX * superTileSize * stilesY * superTileSize;
final boolean en_strong = (maxDiffFar != null) && (maxDiffNear != null); // both should be specified
final boolean en_continue = minStrengthContinue != null;
final int img_tiles = imageTilesX * imageTilesY;
// final TileNeibs tnSurface = new TileNeibs(stilesX * superTileSize, stilesY * superTileSize);
final TileNeibs tnImage = new TileNeibs(imageTilesX, imageTilesY);
......@@ -2660,13 +2678,13 @@ public class TileSurface {
good_disparity = (surf_disp_diff <= maxDiffNear[ml]) && (surf_disp_diff >= -maxDiffFar[ml]);
}
// Strong tiles can only be near the end of expansion - should not go back to weak after strong
// if started from weak, can add any weak or with disparity
// if started from weak, can add any weak or with disparity
if (is_weak_start) {
if (!is_weak_new && !good_disparity){
is_good_tile = false;
break;
}
// if started from not weak - disparity should match and the tile should be "really strong" ( > minStrengthContinue)
// if started from not weak - disparity should match and the tile should be "really strong" ( > minStrengthContinue)
}else {
if (!good_disparity || !en_continue || (strength < minStrengthContinue[ml])) {
is_good_tile = false;
......@@ -2677,7 +2695,7 @@ public class TileSurface {
if (is_good_tile) {
// here - OK to add a new tile
// is it a conflict?
if (flat_assign[nTile1] > 0) { // yes, a conflict
if (flat_assign[nTile1] > 0) { // yes, a conflict
conflict[1] |= 1 << dir;
} else { // new empty cell - add it
flat_assign[nTile1] = ns1 + 1;
......@@ -2722,9 +2740,9 @@ public class TileSurface {
}
return stats;
}
public void printStats(int []stats)
{
boolean nothing = true;
......@@ -2745,7 +2763,7 @@ public class TileSurface {
}
System.out.println();
}
public boolean makesSensToTry(int [] stats)
{
return ((stats[NEW_ASSIGNED] > 0) && (stats[NOT_UNIQUE] > 0));
......@@ -2754,7 +2772,7 @@ public class TileSurface {
{
return stats[NEW_ASSIGNED];
}
public void showAssignment(
String title,
final double [][][] dispStrength)
......@@ -2793,22 +2811,22 @@ public class TileSurface {
img_data[ng * ml + layer_index][nTile] = tileLayers[ml][nTile];
}
}
}
}
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays();
sdfa_instance.showArrays(img_data, imageTilesX, imageTilesY, true, title, titles);
}
/**
* Unassign tiles that have too few connected other tiles (or total weight of the cluster is too small)
* This is a single-threaded method
* @param tileLayers integer array of per measurement layer, per tile of assigned surface indices (modifiesd)
* @param minSize minimal tiles in the cluster
* @param minStrength minimal total strength of the cluster
* @param minStrength minimal total strength of the cluster
* @param dispStrength per measurement layer, combined disparity and strength array ([num_ml][2][])
* @param debugLevel debug level
* @param dbg_X debug tile X coordinate
* @param dbg_Y debug tile Y coordinate
* @return {number of tiles, number of clusters} removed
* @return {number of tiles, number of clusters} removed
*/
public int [] removeSmallClusters(
final int [][] tileLayers,
......@@ -2832,7 +2850,7 @@ public class TileSurface {
for (int nTile0 = 0; nTile0 < tileLayers[ml].length; nTile0++) if ((tileLayers[ml][nTile0] > 0) && !wave_conf[ml][nTile0]){
ArrayList<Point> wave_list = new ArrayList<Point>();
double sum_weight = 0.0;
int tailp = 0; // do not remove elements from the list while building the cluster, just advance tail pointer
int tailp = 0; // do not remove elements from the list while building the cluster, just advance tail pointer
Point p = new Point(nTile0, ml);
sum_weight += dispStrength[p.y][1][p.x];
wave_conf[p.y][p.x] = true;
......@@ -2863,15 +2881,15 @@ public class TileSurface {
Point pt = wave_list.remove(0);
tileLayers[pt.y][pt.x] = 0;
wave_conf [pt.y][pt.x] = false; // not necessary
stats_new[REMOVED_TILES]++;
}
stats_new[REMOVED_CLUSTERS]++;
} else { // it is a strong cluster, nothing to do here (it is already marked in wave_conf[][]
}
}
}
}
return stats_new;
}
......@@ -2888,9 +2906,9 @@ public class TileSurface {
* @param debugLevel debug level
* @param dbg_X debug tile X coordinate
* @param dbg_Y debug tile Y coordinate
* @return {number of tiles, number of clusters} removed
* @return {number of tiles, number of clusters} removed
*/
public int [] assignFromFarthest(
final int [][] tileLayers,
final boolean noEdge,
......@@ -2916,17 +2934,18 @@ public class TileSurface {
final AtomicInteger ai = new AtomicInteger(0);
final TileNeibs tnImage = new TileNeibs(imageTilesX, imageTilesY);
final TileNeibs tnSurface = new TileNeibs(stilesX * superTileSize, stilesY * superTileSize);
for (int ml = 0; ml < tileLayers.length; ml++) if (tileLayers[ml] != null){
final int fml = ml;
ai_numThread.set(0);
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
int numThread = ai_numThread.getAndIncrement(); // unique number of thread to write to rslt_diffs[numThread]
for (int nTile = ai.getAndIncrement(); nTile < tileLayers_src[fml].length; nTile = ai.getAndIncrement()) {
//nTile is in image, not surface coordinates
//nTile is in image, not surface coordinates
int dbg_tileX = nTile % imageTilesX;
int dbg_tileY = nTile / imageTilesX;
int dl = ((debugLevel > -1) && (dbg_tileX == dbg_X ) && (dbg_tileY == dbg_Y ))?3:0;
......@@ -2964,7 +2983,7 @@ public class TileSurface {
break;
}
}
if (!(disp >= min_disp) && !bad_surface) {
best_nSurf = nSurf;
min_disp = disp;
......@@ -2973,7 +2992,7 @@ public class TileSurface {
}
}
} else {
neib_exists = includeImpossible;
neib_exists = includeImpossible;
}
if (neib_exists){
numNeibs++;
......@@ -2988,7 +3007,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
}
for (int nt = 0; nt < numThreads; nt ++){
......@@ -2998,8 +3017,8 @@ public class TileSurface {
}
return stats_new;
}
/**
* Assign tiles to a certain disparity surface if there is only one surface candidate
* @param tileLayers integer array of per measurement layer, per tile of assigned surface indices (modifiesd)
......@@ -3007,7 +3026,7 @@ public class TileSurface {
* @param useCenter only assign outside of 8x8 center if no suitable alternative
* @param maxDiff maximal (normalized) disparity difference
* @param minDiffOther minimal disparity difference to closest 2-nd place candidate
* @param minStrength minimal processed (floor subtracted) correlation strength of the candidate
* @param minStrength minimal processed (floor subtracted) correlation strength of the candidate
* @param maxStrength maximal processed (floor subtracted) correlation strength of the candidate
* @param minSurfStrength minimal surface strength at the tile location
* @param moveDirs +1 - allow moving tile closer to the camera (increase disparity, +2 - allow moving away
......@@ -3028,7 +3047,7 @@ public class TileSurface {
* @param dbg_Y debug tile Y coordinate
* @return statistics array
*/
public int [] assignTilesToSurfaces(
final int [][] tileLayers,
final boolean noEdge,
......@@ -3052,7 +3071,7 @@ public class TileSurface {
final int dbg_X,
final int dbg_Y)
{
final int [][] tileLayers_src = tileLayers.clone();
for (int i = 0; i < tileLayers_src.length; i++){
if (tileLayers_src[i] != null){
......@@ -3069,7 +3088,7 @@ public class TileSurface {
final boolean en_higher = (moveDirs & 2) != 0;
final double radius = sigma * nSigma;
final double rsigma2 = 1.0 / ( 2.0 * sigma * sigma);
final int iradius = (int) Math.round(radius + 0.001);
final int iradius = (int) Math.round(radius + 0.001);
final int field_size = 2 * iradius + 1;
final int center_index = iradius * (field_size + 1);
final double cost_start = 1.0;
......@@ -3084,22 +3103,23 @@ public class TileSurface {
field_size,
field_size - 1,
-1,
-field_size - 1};
-field_size - 1};
for (int ml = 0; ml < tileLayers.length; ml++) if (tileLayers[ml] != null){
final int fml = ml;
ai_numThread.set(0);
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
int numThread = ai_numThread.getAndIncrement(); // unique number of thread to write to rslt_diffs[numThread]
for (int nTile = ai.getAndIncrement(); nTile < tileLayers_src[fml].length; nTile = ai.getAndIncrement()) {
//nTile is in image, not surface coordinates
//nTile is in image, not surface coordinates
int dbg_tileX = nTile % imageTilesX;
int dbg_tileY = nTile / imageTilesX;
int dl = ((debugLevel > -1) && (dbg_tileX == dbg_X ) && (dbg_tileY == dbg_Y ))?3:0;
if (tileLayers_src[fml][nTile] == 0){ // unassigned only
if (dispStrength[fml][1][nTile] < minStrength){
stats_all[numThread][TOO_WEAK] ++;
......@@ -3193,15 +3213,15 @@ public class TileSurface {
if (dl > 0) {
System.out.print("assignTilesToSurfaces(): nTile="+nTile+", candidates=");
for (int ii = 0; ii < candidates.length; ii++){
System.out.print(" "+candidates[ii]);
System.out.print(" "+candidates[ii]);
}
System.out.println();
System.out.println();
}
double [][][] distances = new double [num_fit_other][field_size * field_size ][];
// for each local index get surface tile index
int [] surfIndices = new int [field_size * field_size];
int [] imageIndices = new int [field_size * field_size];
int stx0 = (nTile % imageTilesX) - iradius; //
int stx0 = (nTile % imageTilesX) - iradius; //
int sty0 = (nTile / imageTilesX) - iradius;
for (int iy = 0; iy < field_size; iy++){
for (int ix = 0; ix < field_size; ix++){
......@@ -3280,7 +3300,7 @@ public class TileSurface {
}
}
}
}
}
if (dl > 0) {
for (int cand = 0; cand < distances.length; cand ++){
int num_dist_layers = 0;
......@@ -3306,8 +3326,8 @@ public class TileSurface {
}
}
}
// pulls belong to pairs, not individual surfaces (difference when they cross)
double [][] surface_pulls = new double [num_fit_other][num_fit_other];
// now calculate advantage of each one surface (close_enough) to each other (as a ratio)
......@@ -3347,7 +3367,7 @@ public class TileSurface {
if (good_pair){
double r = distances[is1][lindx][nsurf] - cost_start;
// pull to is1 when in pair with is2
surface_pulls[is1][is2] += Math.exp(- r * r * rsigma2) * strength ;
surface_pulls[is1][is2] += Math.exp(- r * r * rsigma2) * strength ;
}
}
}
......@@ -3356,7 +3376,7 @@ public class TileSurface {
}
}
}
double [][] advantages = new double [num_fit_other][num_fit_other];
for (int is1 = 0; is1 < num_fit_other; is1++){
for (int is2 = is1 + 1; is2 < num_fit_other; is2++){
......@@ -3377,7 +3397,7 @@ public class TileSurface {
}
advantages[is1][is2] = ad1/ad2;
advantages[is2][is1] = ad2/ad1;
if (surfStrPow != 0.0){ // consider surface strength also
if (surfStrPow != 0.0){ // consider surface strength also
double str1 = tileData[nSurfTile][candidates[is1]].getStrength();
double str2 = tileData[nSurfTile][candidates[is1]].getStrength();
if ((str1 > 0.0) && (str2 > 0.0)){
......@@ -3385,13 +3405,13 @@ public class TileSurface {
advantages[is2][is1] = 1.0/advantages[is1][is2];
} else if (str1 > 0.0) {
advantages[is1][is2] = 2.0 * minAdvantage; // sure will win
advantages[is2][is1] = (minAdvantage > 0.0) ? (1.0/advantages[is1][is2]) : 0.0;
advantages[is2][is1] = (minAdvantage > 0.0) ? (1.0/advantages[is1][is2]) : 0.0;
//minAdvantage
} else if (str2 > 0.0) {
advantages[is2][is1] = 2.0 * minAdvantage; // sure will win
advantages[is1][is2] = (minAdvantage > 0.0) ? (1.0/advantages[is2][is1]) : 0.0;
advantages[is1][is2] = (minAdvantage > 0.0) ? (1.0/advantages[is2][is1]) : 0.0;
} else { // both zero - do nothing about surface strengths
}
}
}
......@@ -3402,7 +3422,7 @@ public class TileSurface {
fit = -1;
for (int is1 = 0; is1 < num_fit_other; is1++){
if (close_enough[is1]){ //
boolean is_a_winner = true;
boolean is_a_winner = true;
for (int is2 = is1 + 1; is2 < num_fit_other; is2++){
if (advantages[is1][is2] < minAdvantage){
if (dl > 0) {
......@@ -3414,7 +3434,7 @@ public class TileSurface {
}
break;
}
}
}
if (is_a_winner){
fit = is1;
if (dl > 0) {
......@@ -3437,7 +3457,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
}
for (int nt = 0; nt < numThreads; nt ++){
......@@ -3453,7 +3473,7 @@ public class TileSurface {
return this.tileLayers != null;
}
public int [][] combineTileLayers(
final boolean overwrite,
final int [][] dst,
......@@ -3467,15 +3487,15 @@ public class TileSurface {
for (int i = 0; i < src[ml].length; i++) if ((src[ml][i] != 0) && (overwrite || (dst[ml][i] == 0))){
dst[ml][i] = src[ml][i];
}
}
}
}
return dst;
}
public int [][] newTileLayers(
final boolean [][] tileSel
){
......@@ -3484,14 +3504,14 @@ public class TileSurface {
if (tileSel[ml] != null){
tileLayers[ml] = new int [tileSel[ml].length];
for (int i = 0; i < tileSel[ml].length; i++){
tileLayers[ml][i] = tileSel[ml][i] ? 0: -1; // 0 - unassigned, -1 - prohibited
tileLayers[ml][i] = tileSel[ml][i] ? 0: -1; // 0 - unassigned, -1 - prohibited
}
}
}
return tileLayers;
}
public int [] InitTilesAssignment(
final boolean force,
final double [][][] dispStrength,
......@@ -3516,7 +3536,7 @@ public class TileSurface {
final int [][] tileLayers,
final boolean [][] tileSel,
final int debugLevel)
{
{
int []stats = getTilesAssignStats(tileLayers);
if (debugLevel >= -1) {
System.out.println("sortTilesToSurfaces(): using "+stats[STAT_NUM_ML] +" measurement layers"+
......@@ -3527,8 +3547,8 @@ public class TileSurface {
}
return stats;
}
public boolean [][] extractSelection(
final int debugLevel,
final int dbg_X,
......@@ -3552,9 +3572,9 @@ public class TileSurface {
}
}
return assigned_tiles;
}
}
public int [][] enumerateClusters(
final boolean [][] selection,
final int debugLevel,
......@@ -3589,7 +3609,7 @@ public class TileSurface {
}
}
}
num_cluster ++;
num_cluster ++;
}
}
num_cluster --;
......@@ -3598,7 +3618,7 @@ public class TileSurface {
}
return enum_clust;
}
public int getNumClusters(
int [][] clusters)
{
......@@ -3611,9 +3631,9 @@ public class TileSurface {
}
}
return max_number;
}
public int [][] clusterStats(
int [][] clusters,
final int debugLevel,
......@@ -3671,7 +3691,7 @@ public class TileSurface {
}
}
}
}
}
}
// now sort tiles by the number of tiles in it
ArrayList<Point> sort_list = new ArrayList<Point>(max_number);
......@@ -3704,7 +3724,7 @@ public class TileSurface {
System.out.print(i+": #"+ cluster_stats[i][CLUST_NUM_INDEX]);
System.out.print( " tiles: "+ cluster_stats[i][CLUST_NUM_TILES]);
System.out.print( " layers: "+ cluster_stats[i][CLUST_NUM_LAYERS]);
System.out.print( " overlaps: "+ cluster_stats[i][CLUST_NUM_OVERLAPS]);
System.out.print( " multi-overlaps: "+ cluster_stats[i][CLUST_NUM_MULTI]);
System.out.print( " coflicts A: "+ cluster_stats[i][CLUST_NUM_CONFLICTS_A]);
......@@ -3743,7 +3763,7 @@ public class TileSurface {
for (int i = 0; i < cluster_stats.length; i ++){
rev_clust[cluster_stats[i][CLUST_NUM_INDEX] - 1] = i;
}
String [] titles = new String [num_slices];
int [] slice_first = new int [num_slices];
int nslice = 0;
......@@ -3753,7 +3773,7 @@ public class TileSurface {
titles[nslice++] = "cluster-"+i+((cluster_stats[i][CLUST_NUM_LAYERS]>1)?("_"+j):"");
}
}
for (int nTile = 0; nTile < num_tiles; nTile++) if (clusters[nTile] != null){
int nSurfTile = getSurfaceTileIndex(nTile);
boolean [] rend = new boolean [clusters[nTile].length];
......@@ -3781,12 +3801,12 @@ public class TileSurface {
public boolean [][] growClusterOnce( // used from inside threads
final boolean [][] cluster_local_pure,
final int [] window,
final int [] img_indices,
final int [] surf_indices,
final int [] img_indices,
final int [] surf_indices,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
{
int num_tiles = img_indices.length;
int width = window[2];
......@@ -3815,10 +3835,10 @@ public class TileSurface {
for (int nl0 = 0; nl0 < cluster_local_pure[neTile0].length; nl0++){
if (cluster_local_pure[neTile0][nl0]){ // source should be single-layer, but ...
int nSurfTile0 = surf_indices[neTile0];
int [] neibs = tileData[nSurfTile0][nl0].getNeighbors();
int [] neibs = tileData[nSurfTile0][nl0].getNeighbors();
for (int dir = 0; dir < tnWindow.dirs; dir++) {
int nl1 = neibs[dir];
int nl1 = neibs[dir];
if (nl1 < 0){
if (debugLevel >-1) {
System.out.println("growClusterOnce(): Expected 8 neighbors for tile nSurfTile0="+
......@@ -3857,10 +3877,10 @@ public class TileSurface {
}
System.out.println("growClusterOnce(): new number of tiles = "+dbg_ntiles2+" (was "+dbg_ntiles1+")");
}
return grown_cluster;
}
public void setSurfaceData(
final int [][] cluster_groups,
final int [][] clusters_pure, // each tile/layer belongs to a single pure cluster
......@@ -3877,19 +3897,20 @@ public class TileSurface {
final int [] tilesWH = {imageTilesX, imageTilesY};
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int iGClust = ai.getAndIncrement(); iGClust < sdata.length; iGClust = ai.getAndIncrement()) {
int dl = ((debugLevel > -1) && (iGClust == dbg_X)) ? 1:0;
int dl = ((debugLevel > -1) && (iGClust == dbg_X)) ? 1:0;
int [] window = getClusterBBox(
cluster_groups[iGClust], // final int [] nClust, // 1-based
1, // final int border,
clusters_pure); // final int [][] clusters)
int [][] tile_indices = getClusterBBoxIndices(
window, // final int [] window,
0); // border); // final int border) // maybe 0, actual value just saves time
int [] img_tile_indices = tile_indices[0];
int [] img_tile_indices = tile_indices[0];
int [] surf_tile_indices = tile_indices[1];
boolean [][] combined = new boolean [img_tile_indices.length][];
if (dl > 0){
......@@ -3907,7 +3928,7 @@ public class TileSurface {
cluster_groups[iGClust][indxClust], // final int numToSplit, // 1-based
clusters_pure, // final int [][] clusters,
window, // final int [] window,
img_tile_indices, // final int [] bbox_indices,
img_tile_indices, // final int [] bbox_indices,
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
......@@ -3931,14 +3952,14 @@ public class TileSurface {
pure_sel[neTile] = true;
break;
}
}
}
}
}
boolean [][] grown_combined = growClusterOnce( // used from inside threads
combined, // final boolean [][] cluster_local_pure,
window, // final int [] window,
img_tile_indices, // final int [] bbox_indices,
surf_tile_indices, // final int [] surf_indices,
img_tile_indices, // final int [] bbox_indices,
surf_tile_indices, // final int [] surf_indices,
0, // dl, // debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
......@@ -3954,7 +3975,7 @@ public class TileSurface {
}
break;
}
}
}
}
}
sdata[iGClust] = new SurfaceData(
......@@ -3980,7 +4001,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
ArrayList<SurfaceData> sdata_list = new ArrayList<SurfaceData>();
for (int i = 0; i < sdata.length; i++){
......@@ -4007,7 +4028,7 @@ public class TileSurface {
System.out.println("setSurfaceData(): DONE");
}
}
public SurfaceData getSurfaceData(int indx){
return this.surfaceData[indx];
}
......@@ -4015,7 +4036,7 @@ public class TileSurface {
public int getSurfaceDataLength(){
return this.surfaceData.length;
}
public int [][] mergeNoConflict(
final int [][] matchedGrown,
final int [][] clusters_grown,
......@@ -4029,11 +4050,12 @@ public class TileSurface {
}
final int num_grown = matchedGrown.length;
final int [][][] pre_merged_subs = new int [num_grown][][];
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int iGClust = ai.getAndIncrement(); iGClust < num_grown; iGClust = ai.getAndIncrement()) {
int num_subs = matchedGrown[iGClust].length;
......@@ -4048,7 +4070,7 @@ public class TileSurface {
if (dl > 3){
System.out.println("mergeNoConflict(): nGClust: "+nGClust+" num_subs= "+num_subs);
}
// create window for the grown cluster, it will include all subs
int [] window = getClusterBBox(
nGClust, // 1-based
......@@ -4058,7 +4080,7 @@ public class TileSurface {
int [][] tile_indices = getClusterBBoxIndices(
window, // final int [] window,
0); // border); // final int border) // maybe 0, actual value just saves time
int [] img_tile_indices = tile_indices[0];
int [] img_tile_indices = tile_indices[0];
int [] surf_tile_indices = tile_indices[1];
int num_tiles = img_tile_indices.length;
int [] clust_sizes = new int [num_subs];
......@@ -4069,16 +4091,16 @@ public class TileSurface {
matchedGrown[iGClust][nSub], // final int numToSplit, // 1-based
clusters_pure, // final int [][] clusters,
window, // final int [] window,
img_tile_indices, // final int [] bbox_indices,
img_tile_indices, // final int [] bbox_indices,
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
subs_grown[nSub] = growClusterOnce( // used from inside threads
subs_pure[nSub], // final boolean [][] cluster_local_pure,
window, // final int [] window,
img_tile_indices, // final int [] bbox_indices,
surf_tile_indices, // final int [] surf_indices,
img_tile_indices, // final int [] bbox_indices,
surf_tile_indices, // final int [] surf_indices,
0, // dl, // debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
......@@ -4086,7 +4108,7 @@ public class TileSurface {
// now build sets of clusters
// a) connected to current (and having higher number)
// b) conflicting with current (and having higher number)
ArrayList<HashSet<Integer>> touching_list = new ArrayList<HashSet<Integer>>();
ArrayList<HashSet<Integer>> touching_list = new ArrayList<HashSet<Integer>>();
ArrayList<HashSet<Integer>> conflict_list = new ArrayList<HashSet<Integer>>();
for (int nSub = 0; nSub < num_subs; nSub++){
touching_list.add(new HashSet<Integer>());
......@@ -4116,7 +4138,7 @@ public class TileSurface {
}
}
}
ArrayList<HashSet<Integer>> groups_list = new ArrayList<HashSet<Integer>>();
HashSet<Integer> clusters_left = new HashSet<Integer>();
for (int nSub = 0; nSub < num_subs; nSub++){
......@@ -4127,7 +4149,7 @@ public class TileSurface {
HashSet<Integer> candidates = new HashSet<Integer>();
HashSet<Integer> conflicts = new HashSet<Integer>();
// HashSet<Integer> touching = new HashSet<Integer>();
// start with the largest of the remaining clusters
Integer best_sub = -1;
for (Integer sc:clusters_left){
......@@ -4142,7 +4164,7 @@ public class TileSurface {
conflicts.addAll(conflict_list.get(best_sub)); // keep track of all accumulated conflicts
candidates.addAll(touching_list.get(best_sub)); // add all clusters that are touching the current selection
candidates.removeAll(conflicts); // remove all conflicts
while (!candidates.isEmpty()) { // add more clusters if possible
// Find the largest one
best_sub = -1;
......@@ -4173,7 +4195,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
// "flatten" pre_merged_subs
int num_new_groups = 0;
......@@ -4190,7 +4212,7 @@ public class TileSurface {
merged_subs[indx++] = pre_merged_subs[ng][nc];
}
}
if (debugLevel > 0) {
for (int i = 0; i < num_new_groups; i++){
System.out.print("mergeNoConflict(): "+ (i+1)+" [");
......@@ -4201,12 +4223,12 @@ public class TileSurface {
System.out.println(" ]");
}
}
return merged_subs;
}
/**
* Grow each of the clusters (encoded as positive cluster numbers per tile per layer) by 1 in each
* of 8 directions. As they now may overlap they are encoded in boolean array [cluster][tile][layer]
......@@ -4230,6 +4252,7 @@ public class TileSurface {
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int iClust = ai.getAndIncrement(); iClust < num_clusters; iClust = ai.getAndIncrement()) {
int nClust = iClust + 1; // 1-based
......@@ -4241,10 +4264,10 @@ public class TileSurface {
}
clusters[iClust][nTile0][nl0] = true;
int nSurfTile0 = getSurfaceTileIndex(nTile0);
int [] neibs = tileData[nSurfTile0][nl0].getNeighbors();
int [] neibs = tileData[nSurfTile0][nl0].getNeighbors();
for (int dir = 0; dir < tnImage.dirs; dir++) {
int nl1 = neibs[dir];
int nl1 = neibs[dir];
if (nl1 < 0){
if (debugLevel >-1) {
System.out.println("growEachCluster(): Expected 8 neighbors for tile nSurfTile0="+
......@@ -4269,14 +4292,14 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return clusters;
}
/**
* Match grown/merged clusters to the ones they were made of
* @param clusters_grown per-tile, per layer array of positive merged cluster numbers (0 - empty)
......@@ -4299,6 +4322,7 @@ public class TileSurface {
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int iparent = ai.getAndIncrement(); iparent < num_grown; iparent = ai.getAndIncrement()) {
int num_parent = iparent + 1; // 1-based
......@@ -4322,7 +4346,7 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
if (debugLevel > 0) {
for (int i = 0; i < num_grown; i++){
......@@ -4336,8 +4360,8 @@ public class TileSurface {
}
return grown_subs;
}
public int [] getClusterBBox(
final int nClust, // 1-based
final int border,
......@@ -4351,7 +4375,7 @@ public class TileSurface {
for (int nl = 0; nl < clusters[nTile].length; nl++){
if (clusters[nTile][nl] == nClust){
int tX = nTile % imageTilesX;
int tY = nTile / imageTilesX;
if (tX < x_min) x_min = tX;
if (tX > x_max) x_max = tX;
......@@ -4362,11 +4386,11 @@ public class TileSurface {
}
int x0 = x_min - border; // may be negative
int y0 = y_min - border; // may be negative
int width = x_max +1 + border - x0;
int width = x_max +1 + border - x0;
int height = y_max +1 + border - y0;
int [] bbox = {x0, y0, width, height};
return bbox;
}
}
public int [] getClusterBBox(
final int [] nClust, // 1-based
final int border,
......@@ -4394,15 +4418,15 @@ public class TileSurface {
}
int x0 = x_min - border; // may be negative
int y0 = y_min - border; // may be negative
int width = x_max +1 + border - x0;
int width = x_max +1 + border - x0;
int height = y_max +1 + border - y0;
int [] bbox = {x0, y0, width, height};
return bbox;
}
}
public int [][] getClusterBBoxIndices(
final int [] window,
final int border) // maybe 0, actual value just saves time
......@@ -4414,8 +4438,8 @@ public class TileSurface {
int [][] bbox_indices = new int [2][width*height]; // [0] - image index, [1] - surface index
int wb = width - border;
int hb = height - border;
for (int neTile = 0; neTile < bbox_indices[0].length; neTile++){
bbox_indices[0][neTile] = -1;
bbox_indices[1][neTile] = -1;
......@@ -4427,19 +4451,19 @@ public class TileSurface {
if ((tx >= 0) && (ty >= 0) && (tx < imageTilesX) && (ty < imageTilesY)){
bbox_indices[0][neTile] = ty * imageTilesX + tx;
bbox_indices[1][neTile] = ty * stilesX *superTileSize + tx;
}
}
}
}
return bbox_indices;
}
public boolean [][] extractCluster(
final int numToSplit, // 1-based
final int [][] clusters,
final int [] window,
final int [] bbox_indices,
final int [] bbox_indices,
final int debugLevel,
final int dbg_X,
final int dbg_Y)
......@@ -4448,7 +4472,7 @@ public class TileSurface {
if (numToSplit == 74) { //177){
System.out.println("extractCluster() numToSplit="+numToSplit);
}
for (int neTile = 0; neTile < extracted_cluster.length; neTile++){
int nTile = bbox_indices[neTile];
if (nTile >= 0){
......@@ -4463,16 +4487,16 @@ public class TileSurface {
if (has_cluster){
extracted_cluster[neTile] = new boolean[clusters[nTile].length];
for (int nl = 0; nl< clusters[nTile].length; nl++){
extracted_cluster[neTile][nl] = (clusters[nTile][nl] == numToSplit);
extracted_cluster[neTile][nl] = (clusters[nTile][nl] == numToSplit);
}
}
}
}
}
return extracted_cluster;
}
public int [][] spitConflictClusters(
final int [][] clusters,
......@@ -4484,7 +4508,7 @@ public class TileSurface {
if (debugLevel > -1){
System.out.println("spitConflictClusters(): number of original clusters = " + num_clusters);
}
// final int [][] split_windows = new int [num_clusters][]; // x,y,w,h for the cluster that is split
// final int [][] split_windows = new int [num_clusters][]; // x,y,w,h for the cluster that is split
final int [][][] split_indices = new int [num_clusters][2][]; // for each tile in the window - number of the corresponding image tile or -1
final int [][][] split_clusters = new int [num_clusters][][]; // for each tile in the window, a stack of layers, each number of a subcluster or 0
final int [] split_number = new int [num_clusters]; // number of sublusters the parent was split to
......@@ -4492,6 +4516,7 @@ public class TileSurface {
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int numToSplit = ai.getAndIncrement(); numToSplit < num_clusters; numToSplit = ai.getAndIncrement()) {
if ((numToSplit + 1) == 74) { //177){
......@@ -4511,7 +4536,7 @@ public class TileSurface {
numToSplit + 1, // final int numToSplit, // 1-based
clusters, // final int [][] clusters,
window, // final int [] window,
split_indices[numToSplit][0], // final int [] bbox_indices,
split_indices[numToSplit][0], // final int [] bbox_indices,
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
......@@ -4520,18 +4545,18 @@ public class TileSurface {
numToSplit + 1, // final int numToSplit, // 1-based
clusters, // final int [][] clusters,
window, // final int [] window,
split_indices[numToSplit][0], // final int [] bbox_indices,
split_indices[numToSplit][1], // final int [] bbox_surf_indices,
split_indices[numToSplit][0], // final int [] bbox_indices,
split_indices[numToSplit][1], // final int [] bbox_surf_indices,
extracted_cluster, // final boolean [][] extracted_cluster,
debugLevel, // final int debugLevel,
dbg_X, // final int dbg_X,
dbg_Y); // final int dbg_Y)
split_number[numToSplit] = getNumClusters(split_clusters[numToSplit]);
}
}
};
}
}
ImageDtt.startAndJoin(threads);
final int [] new_cluster_index = new int [num_clusters];
for (int i = 0; i < num_clusters; i++) {
......@@ -4544,17 +4569,18 @@ public class TileSurface {
}
final int [][] new_clusters = new int [clusters.length][];
for (int i = 0; i < new_clusters.length; i++) if (clusters[i] != null) new_clusters[i] = new int [clusters[i].length];
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int numToSplit = ai.getAndIncrement(); numToSplit < num_clusters; numToSplit = ai.getAndIncrement()) {
int numToSplit1 = numToSplit+1;
if (numToSplit1 == 74) { //177){
System.out.println("extractCluster() numToSplit1="+numToSplit1);
}
for (int neTile = 0; neTile < split_indices[numToSplit][0].length; neTile++) {
int nTile = split_indices[numToSplit][0][neTile] ;
if ((nTile >= 0) && (clusters[nTile] != null)){
......@@ -4568,16 +4594,16 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
if (debugLevel > -1){
System.out.println("spitConflictClusters(): DONE");
}
return new_clusters;
}
/**
*
*
* @param numToSplit
* @param clusters
* @param window
......@@ -4619,14 +4645,14 @@ public class TileSurface {
}
}
}
System.out.println("splitCluster() numToSplit="+numToSplit+" num tiles = "+ num_tiles);
}
int num_cluster = 1;
while (true) {
int neTile0 = 0;
int nl0 = 0;
label_loop : {
......@@ -4655,7 +4681,7 @@ public class TileSurface {
int neTile = p.x;
int nl = p.y;
int nSurfTile = bbox_surf_indices[neTile]; // WRONG - fixed
int [] neibs = tileData[nSurfTile][nl].getNeighbors();
int [] neibs = tileData[nSurfTile][nl].getNeighbors();
for (int dir = 0; dir < tnCluster.numNeibs(); dir++) {
int neTile1 = tnCluster.getNeibIndex(neTile,dir);
if ((neTile1 >= 0) && (extracted_cluster[neTile1] != null)) { // it should have original tile there, not yet assigned to a new
......@@ -4703,7 +4729,7 @@ public class TileSurface {
}
}
}
}
if (numToSplit == 74) { //177){
System.out.println("splitCluster() numToSplit="+numToSplit+" num_cluster="+num_cluster+" num tiles="+dbg_size);
......@@ -4724,12 +4750,12 @@ public class TileSurface {
}
}
}
System.out.println("splitCluster() return numToSplit="+numToSplit + " num tiles = "+ num_tiles+ " num tiles1 = "+ num_tiles1);
}
return split_cluster;
}
public boolean [][] growSelectionOnce(
// int grow,
final boolean [][] sel_in,
......@@ -4746,12 +4772,13 @@ public class TileSurface {
// make sure even empty tiles have surface selection arrays defined
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < sel_in.length; nTile = ai.getAndIncrement()) {
boolean not_empty = false;
boolean [] stack = null;
if (sel_in[nTile] != null) {
stack = sel_in[nTile].clone();
stack = sel_in[nTile].clone();
}
int nSurfTile = getSurfaceTileIndex(nTile);
if (tileData[nSurfTile] != null) {
......@@ -4773,7 +4800,7 @@ public class TileSurface {
}
}
}
}
}
}
if (not_empty){
selection[nTile] = stack;
......@@ -4781,13 +4808,13 @@ public class TileSurface {
}
}
};
}
}
ImageDtt.startAndJoin(threads);
return selection;
}
public boolean [][] growSelection(
int grow,
final boolean [][] sel_in,
......@@ -4799,7 +4826,7 @@ public class TileSurface {
final int DIR_LEFT = 6;
final int DIR_UP = 0;
final int DIR_DOWN = 4;
final int [] DIRS = {DIR_LEFT, DIR_RIGHT, DIR_UP, DIR_DOWN};
final int [] DIRS = {DIR_LEFT, DIR_RIGHT, DIR_UP, DIR_DOWN};
final boolean [][] selection = sel_in.clone();
final boolean [][] selection_tmp = sel_in.clone();
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
......@@ -4810,6 +4837,7 @@ public class TileSurface {
// make sure even empty tiles have surface selection arrays defined
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < sel_in.length; nTile = ai.getAndIncrement()) {
int nSurfTile = getSurfaceTileIndex(nTile);
......@@ -4820,11 +4848,11 @@ public class TileSurface {
selection[nTile] = new boolean[tileData[nSurfTile].length];
}
// selection_tmp[nTile] = new boolean[tileData[nSurfTile].length];
}
}
}
}
};
}
}
ImageDtt.startAndJoin(threads);
for (; grow > 0; grow -=2){
for (int dri = 0; dri < DIRS.length; dri++ ){
......@@ -4833,6 +4861,7 @@ public class TileSurface {
// extend to the right
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < sel_in.length; nTile = ai.getAndIncrement()) {
if (selection[nTile] != null) {
......@@ -4862,6 +4891,7 @@ public class TileSurface {
// extend to the left
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < sel_in.length; nTile = ai.getAndIncrement()) {
if (selection_tmp[nTile] != null){
......@@ -4881,7 +4911,7 @@ public class TileSurface {
}
return selection;
}
public void mergeAndGrow( // TODO: add result
final boolean [][] sel_in,
final int debugLevel,
......@@ -4893,7 +4923,7 @@ public class TileSurface {
debugLevel,
dbg_X,
dbg_Y);
int [][] clusters_pure = enumerateClusters(
assigned_sel, //final boolean [][] selection,
debugLevel,
......@@ -4922,7 +4952,7 @@ public class TileSurface {
debugLevel,
dbg_X,
dbg_Y);
int [][] merged_no_conflict = mergeNoConflict(
grown_sub_clusters,
clusters_grown,
......@@ -4937,11 +4967,11 @@ public class TileSurface {
0, // final int debugLevel,
dbg_X,
dbg_Y);
System.out.println("mergeAndGrow() done");
}
}
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment