Commit 6cbf1da0 authored by Andrey Filippov's avatar Andrey Filippov

implemented common offsets for the tile cluster s

parent 73772bcb
......@@ -23,6 +23,7 @@ public class CLTParameters {
public int clt_window = 1; // currently only 3 types of windows - 0 (none), 1 and 2
public double shift_x = 0.0;
public double shift_y = 0.0;
public int tileStep = 4; // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
public int iclt_mask = 15; // which transforms to combine
public int tileX = 258; // number of kernel tile (0..163)
public int tileY = 133; // number of kernel tile (0..122)
......@@ -826,6 +827,7 @@ public class CLTParameters {
properties.setProperty(prefix+"clt_window", this.clt_window+"");
properties.setProperty(prefix+"shift_x", this.shift_x+"");
properties.setProperty(prefix+"shift_y", this.shift_y+"");
properties.setProperty(prefix+"tileStep", this.tileStep+"");
properties.setProperty(prefix+"iclt_mask", this.iclt_mask+"");
properties.setProperty(prefix+"tileX", this.tileX+"");
properties.setProperty(prefix+"tileY", this.tileY+"");
......@@ -1540,6 +1542,8 @@ public class CLTParameters {
if (properties.getProperty(prefix+"clt_window")!=null) this.clt_window=Integer.parseInt(properties.getProperty(prefix+"clt_window"));
if (properties.getProperty(prefix+"shift_x")!=null) this.shift_x=Double.parseDouble(properties.getProperty(prefix+"shift_x"));
if (properties.getProperty(prefix+"shift_y")!=null) this.shift_y=Double.parseDouble(properties.getProperty(prefix+"shift_y"));
if (properties.getProperty(prefix+"tileStep")!=null) this.tileStep=Integer.parseInt(properties.getProperty(prefix+"tileStep"));
if (properties.getProperty(prefix+"iclt_mask")!=null) this.iclt_mask=Integer.parseInt(properties.getProperty(prefix+"iclt_mask"));
if (properties.getProperty(prefix+"tileX")!=null) this.tileX=Integer.parseInt(properties.getProperty(prefix+"tileX"));
if (properties.getProperty(prefix+"tileY")!=null) this.tileY=Integer.parseInt(properties.getProperty(prefix+"tileY"));
......@@ -1553,8 +1557,6 @@ public class CLTParameters {
if (properties.getProperty(prefix+"scale_strength_main")!=null) this.scale_strength_main=Double.parseDouble(properties.getProperty(prefix+"scale_strength_main"));
if (properties.getProperty(prefix+"scale_strength_aux")!=null) this.scale_strength_aux=Double.parseDouble(properties.getProperty(prefix+"scale_strength_aux"));
if (properties.getProperty(prefix+"norm_kern")!=null) this.norm_kern=Boolean.parseBoolean(properties.getProperty(prefix+"norm_kern"));
if (properties.getProperty(prefix+"gain_equalize")!=null) this.gain_equalize=Boolean.parseBoolean(properties.getProperty(prefix+"gain_equalize"));
if (properties.getProperty(prefix+"colors_equalize")!=null) this.colors_equalize=Boolean.parseBoolean(properties.getProperty(prefix+"colors_equalize"));
......@@ -2274,6 +2276,9 @@ public class CLTParameters {
gd.addNumericField("Lapped transform window type (0- rectangular, 1 - sinus)", this.clt_window, 0);
gd.addNumericField("shift_x", this.shift_x, 4);
gd.addNumericField("shift_y", this.shift_y, 4);
gd.addNumericField("Lazy eye cluster size", this.tileStep, 0, 6, "tiles",
"Process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters");
gd.addNumericField("Bit mask - which of 4 transforms to combine after iclt", this.iclt_mask, 0);
gd.addNumericField("Tile X to extract (0..163)", this.tileX, 0);
gd.addNumericField("Tile Y to extract (0..122)", this.tileY, 0);
......@@ -3125,6 +3130,8 @@ public class CLTParameters {
this.clt_window= (int) gd.getNextNumber();
this.shift_x = gd.getNextNumber();
this.shift_y = gd.getNextNumber();
this.tileStep= (int) gd.getNextNumber();
this.iclt_mask= (int) gd.getNextNumber();
this.tileX= (int) gd.getNextNumber();
this.tileY= (int) gd.getNextNumber();
......
......@@ -721,6 +721,7 @@ private Panel panel1,
addButton("Create AUX CLT kernels", panelLWIR, color_process_aux);
addButton("Select source sets", panelLWIR, color_configure);
addButton("Configure color", panelLWIR, color_configure);
addButton("CORR TEST", panelLWIR, color_conf_process);
addButton("CLT 4 images", panelLWIR, color_conf_process);
addButton("CLT 3D", panelLWIR, color_process);
addButton("CLT planes", panelLWIR, color_conf_process);
......@@ -4312,7 +4313,10 @@ private Panel panel1,
return;
} else if (label.equals("CLT 4 images") || label.equals("CLT apply fine corr") || label.equals("CLT infinity corr")) {
} else if (label.equals("CLT 4 images") ||
label.equals("CLT apply fine corr") ||
label.equals("CLT infinity corr") ||
label.equals("CORR TEST")) {
boolean apply_corr = label.equals("CLT apply fine corr");
boolean infinity_corr = label.equals("CLT infinity corr");
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
......@@ -4371,18 +4375,35 @@ private Panel panel1,
int num_infinity_corr = infinity_corr? CLT_PARAMETERS.inf_repeat : 1;
if ( num_infinity_corr < 1) num_infinity_corr = 1;
for (int i_infinity_corr = 0; i_infinity_corr < num_infinity_corr; i_infinity_corr++) {
QUAD_CLT.processCLTQuadCorrs(
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
apply_corr,
infinity_corr, // calculate and apply geometry correction at infinity
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
if (label.equals("CORR TEST")) {
QUAD_CLT.processCLTQuadCorrsTest(
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
apply_corr,
infinity_corr, // calculate and apply geometry correction at infinity
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
} else {
QUAD_CLT.processCLTQuadCorrs(
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
apply_corr,
infinity_corr, // calculate and apply geometry correction at infinity
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
}
}
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
......
......@@ -79,12 +79,19 @@ public class Corr2dLMA {
final static int B_INDEX = 2; // 2*B*(x-x0)*(y-y0)
final static int CMA_INDEX = 3; // C*(y-y0)^2, encode C-A
final static int G0_INDEX = 4; // scale of correlation pair,
final static int DDISP_INDEX = G0_INDEX + NUM_PAIRS; // disparity offset per camera (at least 1 should be disabled)
final static int NDISP_INDEX = DDISP_INDEX + NUM_CAMS; // disparity offset per camera - none should be disable
final static int NUM_ALL_PARS = NDISP_INDEX+ NUM_CAMS; // maximal number of parameters
final static int TILE_PARAMS = G0_INDEX + NUM_PAIRS; // number of tile-individual parameters
// final static int DDISP_INDEX = G0_INDEX + NUM_PAIRS; // disparity offset per camera (at least 1 should be disabled)
// final static int NDISP_INDEX = DDISP_INDEX + NUM_CAMS; // disparity offset per camera - none should be disable
// final static int NUM_ALL_PARS = NDISP_INDEX+ NUM_CAMS; // maximal number of parameters
private int DDISP_INDEX; // = G0_INDEX + NUM_PAIRS; // disparity offset per camera (at least 1 should be disabled)
private int NDISP_INDEX; // = DDISP_INDEX + NUM_CAMS; // disparity offset per camera - none should be disable
private int NUM_ALL_PARS; // = NDISP_INDEX+ NUM_CAMS; // maximal number of parameters
final int [] USED_CAMS_MAP = new int[NUM_CAMS]; // for each camera index return used index ???
final int [][] USED_PAIRS_MAP = new int[NUM_CAMS][NUM_CAMS]; // for each camera index return used index ??
// final int [][] USED_PAIRS_MAP = new int[NUM_CAMS][NUM_CAMS]; // for each camera index return used index ??
private int [][][] USED_PAIRS_MAP; // for each camera index return used index ??
final static String [] PAR_NAMES = {"DISP","A","B","C-A"};
final static String PAR_NAME_SCALE = "SCALE";
......@@ -96,7 +103,7 @@ public class Corr2dLMA {
double [] vector;
double [] scales = {1.0, 2.0, 4.0};
ArrayList<Sample> samples = new ArrayList<Sample>();
double [] pair_weights = null; // per pair weights (sum == 1.0)
//// double [] pair_weights = null; // per pair weights (sum == 1.0) Not really needed?
double [] weights; // normalized so sum is 1.0 for all - samples and extra regularization terms
double pure_weight; // weight of samples only
double [] values;
......@@ -111,17 +118,21 @@ public class Corr2dLMA {
private final int transform_size;
private final double [][] corr_wnd;
private boolean [] used_cameras;
private final Matrix [] m_disp = new Matrix[NUM_CAMS];
private int ncam = 0; // number of used cameras
private int npairs=0; // number of used pairs
// private final Matrix [] m_disp = new Matrix[NUM_CAMS];
private Matrix [][] m_disp;
private int ncam; // number of used cameras
private int [] npairs; // number of used pairs per tile
private int last_cam; // index of the last camera (special treatment for disparity correction)
// private boolean second_last; // there is a pair where the second camera is the last one (false: first in a pair is the last one)
private final Matrix [][] m_pairs = new Matrix[NUM_CAMS][NUM_CAMS];
private final Matrix [][] m_pairs_last = new Matrix[NUM_CAMS][NUM_CAMS];
// private final Matrix [][] m_pairs = new Matrix[NUM_CAMS][NUM_CAMS];
private Matrix [][][] m_pairs;
// private final Matrix [][] m_pairs_last = new Matrix[NUM_CAMS][NUM_CAMS];
private final int [][] pindx = new int [NUM_CAMS][NUM_CAMS];
private int numTiles = 1;
public class Sample{ // USED in lwir
int tile; // tile in a cluster
int fcam; // first camera index
int scam; // second camera index
int ix; // x coordinate in 2D correlation (0.. 2*transform_size-2, center: (transform_size-1)
......@@ -129,6 +140,7 @@ public class Corr2dLMA {
double v; // correlation value at that point
double w; // weight
Sample (
int tile,
int fcam, // first camera index
int scam, // second camera index
int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
......@@ -136,6 +148,7 @@ public class Corr2dLMA {
double v, // correlation value at that point
double w)
{
this.tile = tile;
this.fcam = fcam;
this.scam = scam;
this.ix = x;
......@@ -145,64 +158,87 @@ public class Corr2dLMA {
}
}
public Corr2dLMA (
int ts, // null - use default table
double [][] corr_wnd // may be null
) {
int numTiles,
int ts, // null - use default table
double [][] corr_wnd // may be null
) {
for (int f = 0; f < NUM_CAMS; f++) {
pindx[f][f]=-1;
for (int s = f+1; s < NUM_CAMS; s++) {
pindx[f][s] = getPairIndex(f,s);
pindx[s][f] = pindx[f][s];
}
}
this.numTiles = numTiles;
DDISP_INDEX = this.numTiles * TILE_PARAMS;
NDISP_INDEX = DDISP_INDEX + NUM_CAMS; // disparity offset per camera - none should be disable
NUM_ALL_PARS = NDISP_INDEX+ NUM_CAMS; // maximal number of parameters
boolean sq = false;
this.transform_size = ts;
if (corr_wnd!=null) {
if (corr_wnd != null) {
this.corr_wnd = corr_wnd;
return;
}
this.corr_wnd = new double[2 * transform_size - 1][2 * transform_size - 1];
int tsm1 = transform_size - 1; // 7
int dtsm1 = 2 * transform_size - 1; // 15
this.corr_wnd[tsm1][tsm1] = 1.0;
for (int i = 1; i < transform_size; i++) {
this.corr_wnd[tsm1 + i][tsm1 ] = Math.cos(Math.PI*i/(2 * transform_size));
this.corr_wnd[tsm1 - i][tsm1 ] = Math.cos(Math.PI*i/(2 * transform_size));
this.corr_wnd[tsm1 ][tsm1 + i] = Math.cos(Math.PI*i/(2 * transform_size));
this.corr_wnd[tsm1 ][tsm1 - i] = Math.cos(Math.PI*i/(2 * transform_size));
}
for (int i = 1; i < transform_size; i++) {
for (int j = 1; j < transform_size; j++) {
double d = this.corr_wnd[tsm1 + i][tsm1] * this.corr_wnd[tsm1 + j][tsm1];
this.corr_wnd[tsm1 + i][tsm1 + j] = d;
this.corr_wnd[tsm1 + i][tsm1 - j] = d;
this.corr_wnd[tsm1 - i][tsm1 + j] = d;
this.corr_wnd[tsm1 - i][tsm1 - j] = d;
}
}
if (sq) {
for (int i = 0; i < dtsm1; i++) {
for (int j = 0; j < dtsm1; j++) {
this.corr_wnd[i][j] *=this.corr_wnd[i][j];
} else {
this.corr_wnd = new double[2 * transform_size - 1][2 * transform_size - 1];
int tsm1 = transform_size - 1; // 7
int dtsm1 = 2 * transform_size - 1; // 15
this.corr_wnd[tsm1][tsm1] = 1.0;
for (int i = 1; i < transform_size; i++) {
this.corr_wnd[tsm1 + i][tsm1 ] = Math.cos(Math.PI*i/(2 * transform_size));
this.corr_wnd[tsm1 - i][tsm1 ] = Math.cos(Math.PI*i/(2 * transform_size));
this.corr_wnd[tsm1 ][tsm1 + i] = Math.cos(Math.PI*i/(2 * transform_size));
this.corr_wnd[tsm1 ][tsm1 - i] = Math.cos(Math.PI*i/(2 * transform_size));
}
for (int i = 1; i < transform_size; i++) {
for (int j = 1; j < transform_size; j++) {
double d = this.corr_wnd[tsm1 + i][tsm1] * this.corr_wnd[tsm1 + j][tsm1];
this.corr_wnd[tsm1 + i][tsm1 + j] = d;
this.corr_wnd[tsm1 + i][tsm1 - j] = d;
this.corr_wnd[tsm1 - i][tsm1 + j] = d;
this.corr_wnd[tsm1 - i][tsm1 - j] = d;
}
}
if (sq) {
for (int i = 0; i < dtsm1; i++) {
for (int j = 0; j < dtsm1; j++) {
this.corr_wnd[i][j] *=this.corr_wnd[i][j];
}
}
}
}
}
public double[][] getCorrWnd() {
return this.corr_wnd;
}
public void addSample( // x = 0, y=0 - center
int tile,
int fcam, // first camera index
int scam, // second camera index
int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
int y, // y coordinate (0 - disparity axis)
double v, // correlation value at that point
double w){ // sample weight
if ((w > 0) && !Double.isNaN(v)) samples.add(new Sample(fcam,scam,x,y,v,w));
if ((w > 0) && !Double.isNaN(v)) samples.add(new Sample(tile,fcam,scam,x,y,v,w));
}
public double [][] dbgGetSamples(int mode){
public double [][][] dbgGetSamples(int mode){
int [][] comb_map = getCombMap();
int numPairs = comb_map[0][0];
comb_map[0][0] = -1;
int size = 2* transform_size -1;
int size2 = size*size;
double [][] rslt = new double [npairs][size2];
for (int np = 0; np < npairs; np++) {
for (int i = 0; i < size2; i++) {
rslt[np][i] = Double.NaN;
double [][][] rslt = new double [numTiles][numPairs][size2];
for (int nTile = 0; nTile < numTiles; nTile++) {
for (int np = 0; np < numPairs; np++) {
for (int i = 0; i < size2; i++) {
rslt[nTile][np][i] = Double.NaN;
}
}
}
double [] fx = null;
......@@ -213,24 +249,61 @@ public class Corr2dLMA {
for (int ns = 0; ns < samples.size(); ns++) {
Sample s = samples.get(ns);
double d = Double.NaN;
if (mode == 0) d = s.v;
if (mode == 0) d = s.v;
else if (mode == 1) d = s.w;
else if (mode == 2) d = fx[ns];
int np = USED_PAIRS_MAP[s.fcam][s.scam];
rslt[np][s.iy*size + s.ix] = d;
// int np = USED_PAIRS_MAP[0][s.fcam][s.scam]; ////////////////////
int np = comb_map[s.fcam][s.scam]; ////////////////////
rslt[s.tile][np][s.iy*size + s.ix] = d;
}
return rslt;
}
/*
public String [] dbgGetSliceTiles(int ntile) {
String [] srslt = new String [npairs[ntile]];
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
if (USED_PAIRS_MAP[ntile][f][s] >= 0) {
srslt[USED_PAIRS_MAP[ntile][f][s]] = ""+f+"->"+s;
}
}
return srslt;
}
*/
public String [] dbgGetSliceTiles() {
String [] srslt = new String [npairs];
int [][] comb_map = getCombMap();
int np = comb_map[0][0];
comb_map[0][0] = -1;
String [] srslt = new String [np];
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
if (USED_PAIRS_MAP[f][s] >= 0) {
srslt[USED_PAIRS_MAP[f][s]] = ""+f+"->"+s;
if (comb_map[f][s] >= 0) {
srslt[comb_map[f][s]] = ""+f+"->"+s;
}
}
return srslt;
}
public int [][] getCombMap(){
boolean [][] comb_pairs = new boolean[NUM_CAMS][NUM_CAMS];
for (int t = 0; t < numTiles; t++) {
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
comb_pairs[f][s] |= USED_PAIRS_MAP[t][f][s] >= 0;
}
}
int np = 0;
int [][] comb_map = new int [NUM_CAMS][NUM_CAMS];
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
if (comb_pairs[f][s]) comb_map[f][s] = np++;
else comb_map[f][s] = -1;
}
comb_map[0][0] = np;
return comb_map;
}
public int getPairIndex(int f, int s) {
if (f > s) {
......@@ -242,45 +315,57 @@ public class Corr2dLMA {
}
public void setMatrices(double [][] am_disp) {
m_disp = new Matrix[1][NUM_CAMS];
for (int n = 0; n < NUM_CAMS; n++) {
double [][] am = {
{am_disp[n][0],am_disp[n][1]},
{am_disp[n][2],am_disp[n][3]}};
m_disp[n] = new Matrix(am);
m_disp[0][n] = new Matrix(am);
}
}
public void setMatrices(double [][][] am_disp) {
m_disp = new Matrix[am_disp.length][NUM_CAMS];
for (int nt = 0; nt < numTiles; nt++) {
for (int n = 0; n < NUM_CAMS; n++) {
double [][] am = {
{am_disp[nt][n][0], am_disp[nt][n][1]},
{am_disp[nt][n][2], am_disp[nt][n][3]}};
m_disp[nt][n] = new Matrix(am);
}
}
}
public void initVector( // USED in lwir
boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
boolean adjust_scales, // adjust 2D correlation scales - lma_adjust_ag
boolean adjust_ellipse, // allow non-circular correlation maximums lma_adjust_wy
boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
boolean adjust_lazyeye_ortho, // adjust disparity corrections orthogonal to disparities lma_adjust_ly1
double disp0, // initial value of disparity
double [][] disp_str, // initial value of disparity
// double disp0, // initial value of disparity
double half_width, // A=1/(half_widh)^2 lma_half_width
double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
) {
// int [][] pindx = new int [NUM_CAMS][NUM_CAMS];
for (int f = 0; f < NUM_CAMS; f++) {
pindx[f][f]=-1;
for (int s = f+1; s < NUM_CAMS; s++) {
pindx[f][s] = getPairIndex(f,s);
pindx[s][f] = pindx[f][s];
}
}
USED_PAIRS_MAP = new int [numTiles][NUM_CAMS][NUM_CAMS];
used_cameras = new boolean[NUM_CAMS];
boolean [] used_pairs = new boolean[NUM_PAIRS];
boolean [][] used_pairs = new boolean[numTiles][NUM_PAIRS];
// 0-weight values and NaN-s should be filtered on input!
last_cam = -1;
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) USED_PAIRS_MAP[f][s] = -1;
boolean [][] used_pairs_dir = new boolean [NUM_CAMS][NUM_CAMS];
for (int t = 0; t < numTiles; t++) for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
USED_PAIRS_MAP[t][f][s] = -1;
}
boolean [][][] used_pairs_dir = new boolean [numTiles][NUM_CAMS][NUM_CAMS];
for (Sample s:samples) { // ignore zero-weight samples
used_cameras[s.fcam]=true;
used_cameras[s.scam]=true;
used_pairs[pindx[s.fcam][s.scam]]=true; // throws < 0 - wrong pair, f==s
used_pairs_dir[s.fcam][s.scam] = true;
used_pairs[s.tile][pindx[s.fcam][s.scam]]=true; // throws < 0 - wrong pair, f==s
used_pairs_dir[s.tile][s.fcam][s.scam] = true;
}
ncam = 0;
npairs =new int [numTiles];
for (int i = 0; i < NUM_CAMS; i++) {
USED_CAMS_MAP[i] = ncam;
if (used_cameras[i]) {
......@@ -288,35 +373,39 @@ public class Corr2dLMA {
ncam++;
}
}
int [] upmam = new int[NUM_PAIRS];
for (int i = 0; i < NUM_PAIRS; i++) {
upmam[i] = npairs;
if (used_pairs[i]) npairs++;
}
for (int f = 0; f < NUM_CAMS; f++) {
// USED_PAIRS_MAP[f][f] = -1;
for (int s = f+1; s < NUM_CAMS; s++) {
int npair = upmam[pindx[f][s]];
if (used_pairs_dir[f][s]) USED_PAIRS_MAP[f][s] = npair; // either or, can not be f,s and s,f pairs
else if (used_pairs_dir[s][f]) USED_PAIRS_MAP[s][f] = npair;
for (int nTile = 0; nTile < numTiles; nTile++) {
int [] upmam = new int[NUM_PAIRS];
for (int i = 0; i < NUM_PAIRS; i++) {
upmam[i] = npairs[nTile];
if (used_pairs[nTile][i]) npairs[nTile]++;
}
for (int f = 0; f < NUM_CAMS; f++) {
for (int s = f+1; s < NUM_CAMS; s++) {
int npair = upmam[pindx[f][s]];
if (used_pairs_dir[nTile][f][s]) USED_PAIRS_MAP[nTile][f][s] = npair; // either or, can not be f,s and s,f pairs
else if (used_pairs_dir[nTile][s][f]) USED_PAIRS_MAP[nTile][s][f] = npair;
}
}
}
this.all_pars = new double[NUM_ALL_PARS];
this.all_pars[DISP_INDEX] = disp0;
this.all_pars[A_INDEX] = 1.0/(half_width * half_width);
this.all_pars[B_INDEX] = 0.0;
this.all_pars[CMA_INDEX] = 0.0; // C-A
this.all_pars = new double[NUM_ALL_PARS];
this.par_mask = new boolean[NUM_ALL_PARS];
this.par_mask[DISP_INDEX] = true;
this.par_mask[A_INDEX] = adjust_width;
this.par_mask[B_INDEX] = adjust_ellipse;
this.par_mask[CMA_INDEX] = adjust_ellipse;
for (int i = 0; i <NUM_PAIRS; i++) {
this.par_mask[G0_INDEX + i] = used_pairs[i] & adjust_scales;
this.all_pars[G0_INDEX + i] = Double.NaN; // will be assigned later for used - should be for all !
// per-tile parameters
for (int nTile = 0; nTile < numTiles; nTile++) {
this.all_pars[DISP_INDEX + nTile*TILE_PARAMS] = disp_str[nTile][0]; // disp0;
this.all_pars[A_INDEX + nTile*TILE_PARAMS] = 1.0/(half_width * half_width);
this.all_pars[B_INDEX + nTile*TILE_PARAMS] = 0.0;
this.all_pars[CMA_INDEX + nTile*TILE_PARAMS] = 0.0; // C-A
this.par_mask[DISP_INDEX + nTile*TILE_PARAMS] = true;
this.par_mask[A_INDEX + nTile*TILE_PARAMS] = adjust_width;
this.par_mask[B_INDEX + nTile*TILE_PARAMS] = adjust_ellipse;
this.par_mask[CMA_INDEX + nTile*TILE_PARAMS] = adjust_ellipse;
for (int i = 0; i <NUM_PAIRS; i++) {
this.par_mask[G0_INDEX + i + nTile*TILE_PARAMS] = used_pairs[nTile][i] & adjust_scales;
this.all_pars[G0_INDEX + i + nTile*TILE_PARAMS] = Double.NaN; // will be assigned later for used - should be for all !
}
}
// common for all tiles parameters
for (int i = 0; i <NUM_CAMS; i++) {
this.all_pars[DDISP_INDEX + i] = 0.0; // C-A
this.par_mask[DDISP_INDEX + i] = used_cameras[i] & adjust_lazyeye_par & (i != last_cam);
......@@ -328,27 +417,25 @@ public class Corr2dLMA {
weights = new double [np + 2 * NUM_CAMS]; // npairs];
values = new double [np + 2 * NUM_CAMS]; // npairs];
for (int i = 0; i < NUM_CAMS; i++) {
weights[np + i] = (used_cameras[i] & adjust_lazyeye_par)? cost_lazyeye_par : 0.0; // ddisp - including last_camera
weights[np + NUM_CAMS + i] = (used_cameras[i] & adjust_lazyeye_ortho)? cost_lazyeye_odtho : 0.0; // ndisp
weights[np + i] = (used_cameras[i] & adjust_lazyeye_par)? (cost_lazyeye_par * numTiles) : 0.0; // ddisp - including last_camera
weights[np + NUM_CAMS + i] = (used_cameras[i] & adjust_lazyeye_ortho)? (cost_lazyeye_odtho * numTiles) : 0.0; // ndisp
values [np + i] = 0.0;
values [np + NUM_CAMS + i] = 0.0;
}
double sw = 0;
this.pair_weights = new double[NUM_PAIRS];
//// this.pair_weights = new double[NUM_PAIRS];
for (int i = 0; i < np; i++) {
Sample s = samples.get(i);
weights[i] = s.w;
values[i] = s.v;
sw += weights[i];
int indx = pindx[s.fcam][s.scam];
pair_weights[indx] += s.w;
indx += G0_INDEX;
int indx = G0_INDEX + pindx[s.fcam][s.scam] + s.tile * TILE_PARAMS;
double d = s.v;
if (this.corr_wnd !=null) {
d /= this.corr_wnd[s.iy][s.ix];
}
if (!(d <= this.all_pars[indx])) this.all_pars[indx] = d; // to include Double.isNan()
if (!(d <= this.all_pars[indx])) this.all_pars[indx] = d; // to include Double.isNaN()
}
pure_weight = sw;
for (int i = 0; i < 2 * NUM_CAMS; i++) { // weight of the regularization terms (twice number of cameras, some may be disabled by a mask)
......@@ -359,6 +446,7 @@ public class Corr2dLMA {
for (int i = 0; i < weights.length; i++) weights[i] *= kw;
pure_weight *= kw; // it is now fraction (0..1.0), and weights are normalized
}
/*****
double spw = 0;
for (int i = 0; i < NUM_PAIRS; i++) {
spw += pair_weights[i];
......@@ -369,6 +457,7 @@ public class Corr2dLMA {
pair_weights[i]*=rspw;
}
}
*/
par_map = new int [par_mask.length];
int par_indx = 0;
for (int i = 0; i < par_mask.length; i++) {
......@@ -379,13 +468,18 @@ public class Corr2dLMA {
toVector();
}
public void initMatrices() { // should be called after initVector and after setMatrices
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
m_pairs[f][s] = null;
m_pairs_last[f][s] = null;
if (USED_PAIRS_MAP[f][s] >= 0) {
m_pairs[f][s] = m_disp[f].minus(m_disp[s]);
}
m_pairs = new Matrix[USED_PAIRS_MAP.length][NUM_CAMS][NUM_CAMS];
for (int nTile = 0; nTile < USED_PAIRS_MAP.length; nTile++) {
for (int f = 0; f < NUM_CAMS; f++) for (int s = 0; s < NUM_CAMS; s++) {
m_pairs[nTile][f][s] = null;
// m_pairs_last[f][s] = null;
if (USED_PAIRS_MAP[nTile][f][s] >= 0) {
m_pairs[nTile][f][s] = m_disp[nTile][f].minus(m_disp[nTile][s]);
}
/*
if (f == last_cam) {
m_pairs_last[f][s] = m_disp[s].uminus();
for (int i = 0; i < NUM_CAMS; i++) if (used_cameras[i] && (i != last_cam) ){
......@@ -396,6 +490,8 @@ public class Corr2dLMA {
for (int i = 0; i < NUM_CAMS; i++) if (used_cameras[i] && (i != last_cam) ){
m_pairs_last[f][s].plusEquals(m_disp[i]);
}
}
*/
}
}
}
......@@ -405,53 +501,46 @@ public class Corr2dLMA {
double [][] jt) { // should be either [vector.length][samples.size()] or null - then only fx is calculated
if (vector == null) return null;
double [] av = fromVector(vector);
// restoration of the last camera is moved to fromVector()
/*
// restore ddisp("x") offset for the last camera
// prepare parameters common for each camera/camera pair before calculating fx and derivatives
av[DDISP_INDEX + last_cam] = 0.0;
for (int i = 0; i < NUM_CAMS; i++) {
if (used_cameras[i] & (i != last_cam)) {
av[DDISP_INDEX + last_cam] -= av[DDISP_INDEX + i];
}
}
*/
Matrix [] xcam_ycam = new Matrix[NUM_CAMS];
for (int i = 0; i < NUM_CAMS; i++) if (used_cameras[i]) {
double [] add_dnd = {av[DISP_INDEX]+ av[DDISP_INDEX + i], av[NDISP_INDEX + i]};
xcam_ycam[i] = m_disp[i].times(new Matrix(add_dnd,2));
}
double [][][] xp_yp = new double[NUM_CAMS][NUM_CAMS][];
Matrix [][] xcam_ycam = new Matrix[numTiles][NUM_CAMS];
double [][][][] xp_yp = new double[numTiles][NUM_CAMS][NUM_CAMS][];
double [] axc_yc = {transform_size - 1.0, transform_size-1.0};
Matrix xc_yc = new Matrix(axc_yc, 2);
for (int f = 0; f < NUM_CAMS; f++) if (used_cameras[f]) {
for (int s = 0; s < NUM_CAMS; s++) if (used_cameras[s]) {
xp_yp[f][s] =xcam_ycam[f].minus(xcam_ycam[s]).plus(xc_yc).getColumnPackedCopy();
double [] AT = new double [numTiles]; // av[A_INDEX];
double [] BT = new double [numTiles]; // av[B_INDEX];
double [] CT = new double [numTiles]; // A + av[CMA_INDEX];
for (int nTile = 0; nTile < numTiles; nTile++) {
for (int i = 0; i < NUM_CAMS; i++) if (used_cameras[i]) {
double [] add_dnd = {av[DISP_INDEX+ nTile * TILE_PARAMS]+ av[DDISP_INDEX + i], av[NDISP_INDEX + i]};
xcam_ycam[nTile][i] = m_disp[nTile][i].times(new Matrix(add_dnd,2));
}
for (int f = 0; f < NUM_CAMS; f++) if (used_cameras[f]) {
for (int s = 0; s < NUM_CAMS; s++) if (used_cameras[s]) {
xp_yp[nTile][f][s] =xcam_ycam[nTile][f].minus(xcam_ycam[nTile][s]).plus(xc_yc).getColumnPackedCopy();
}
}
AT[nTile] = av[A_INDEX + nTile * TILE_PARAMS];
BT[nTile] = av[B_INDEX + nTile * TILE_PARAMS];
CT[nTile] = AT[nTile] + av[CMA_INDEX + nTile * TILE_PARAMS];
}
//USED_PAIRS_MAP
int num_samples = samples.size();
double [] fx= new double [num_samples + 2 * NUM_CAMS];
// double sqrt2 = Math.sqrt(2.0);
double A = av[A_INDEX];
double B = av[B_INDEX];
double C = A + av[CMA_INDEX];
// double A = av[A_INDEX];
// double B = av[B_INDEX];
// double C = A + av[CMA_INDEX];
//corr_wnd
for (int ns = 0; ns < num_samples; ns++) {
// if (ns == 18) {
// System.out.println("ns == 18");
// }
Sample s = samples.get(ns);
int pair = pindx[s.fcam][s.scam];
double Gp = av[G0_INDEX + pair];
int pair = pindx[s.fcam][s.scam]; // all pairs, noit just used?
double A = AT[s.tile];
double B = BT[s.tile];
double C = CT[s.tile];
double Gp = av[G0_INDEX + pair + s.tile * TILE_PARAMS];
double Wp = corr_wnd[s.ix][s.iy];
double WGp = Wp * Gp;
double xmxp = s.ix - xp_yp[s.fcam][s.scam][0];
double ymyp = s.iy - xp_yp[s.fcam][s.scam][1];
double xmxp = s.ix - xp_yp[s.tile][s.fcam][s.scam][0];
double ymyp = s.iy - xp_yp[s.tile][s.fcam][s.scam][1];
double xmxp2 = xmxp * xmxp;
double ymyp2 = ymyp * ymyp;
double xmxp_ymyp = xmxp * ymyp;
......@@ -460,103 +549,66 @@ public class Corr2dLMA {
if (Double.isNaN(fx[ns])) {
System.out.println("fx["+ns+"]="+fx[ns]);
}
// int np = 0;
if (s.tile > 0) {
System.out.print("");
}
if (jt != null) {
if (par_map[DISP_INDEX] >= 0) jt[par_map[DISP_INDEX]][ns] = 2 * WGp *
((A * xmxp + B * ymyp) * m_pairs[s.fcam][s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_pairs[s.fcam][s.scam].get(1, 0));
if (par_map[A_INDEX] >= 0) jt[par_map[A_INDEX]][ns] = -WGp*(xmxp2 + ymyp2);
if (par_map[B_INDEX] >= 0) jt[par_map[B_INDEX]][ns] = -WGp* 2 * xmxp_ymyp;
if (par_map[CMA_INDEX] >= 0) jt[par_map[CMA_INDEX]][ns] = -WGp* ymyp2;
for (int p = 0; p < npairs; p++) { // par_mask[G0_INDEX + p] as all pairs either used, or not - then npairs == 0
if (par_map[G0_INDEX + p] >= 0) jt[par_map[G0_INDEX + p]][ns] = (p== pair)? d : 0.0; // (par_mask[G0_INDEX + pair])? d;
if (par_map[DISP_INDEX + s.tile*TILE_PARAMS] >= 0) jt[par_map[DISP_INDEX + s.tile*TILE_PARAMS]][ns] = 2 * WGp *
((A * xmxp + B * ymyp) * m_pairs[s.tile][s.fcam][s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_pairs[s.tile][s.fcam][s.scam].get(1, 0));
if (par_map[A_INDEX + s.tile*TILE_PARAMS] >= 0) jt[par_map[A_INDEX + s.tile*TILE_PARAMS]][ns] = -WGp*(xmxp2 + ymyp2);
if (par_map[B_INDEX + s.tile*TILE_PARAMS] >= 0) jt[par_map[B_INDEX + s.tile*TILE_PARAMS]][ns] = -WGp* 2 * xmxp_ymyp;
if (par_map[CMA_INDEX + s.tile*TILE_PARAMS] >= 0) jt[par_map[CMA_INDEX + s.tile*TILE_PARAMS]][ns] = -WGp* ymyp2;
for (int p = 0; p < npairs[s.tile]; p++) { // par_mask[G0_INDEX + p] as all pairs either used, or not - then npairs == 0
if (par_map[G0_INDEX + p + s.tile*TILE_PARAMS] >= 0) jt[par_map[G0_INDEX + p + s.tile*TILE_PARAMS]][ns] = (p== pair)? d : 0.0; // (par_mask[G0_INDEX + pair])? d;
}
// process ddisp (last camera not used, is equal to minus sum of others to make a sum == 0)
// Need to fix for missing parameters
// for (int f = 0; f < ncam - 1; f++) if (par_map[DDISP_INDEX + f] >= 0) { // -1 for the last_cam
// jt[np + USED_CAMS_MAP[f]][ns] = 0.0;
for (int f = 0; f < NUM_CAMS; f++) if (par_map[DDISP_INDEX + f] >= 0) { // -1 for the last_cam
jt[par_map[DDISP_INDEX + f]][ns] = 0.0;
}
if (par_map[DDISP_INDEX + s.fcam] >= 0){ // par_map[DDISP_INDEX + last_cam] always <0
jt[par_map[DDISP_INDEX + s.fcam]][ns] += 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.fcam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.fcam].get(1, 0));
((A * xmxp + B * ymyp) * m_disp[s.tile][s.fcam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][s.fcam].get(1, 0));
} else if (s.fcam == last_cam) {
for (int c = 0; c < NUM_CAMS; c++) if ((c != last_cam) && (par_map[DDISP_INDEX + c] >=0)) {
jt[par_map[DDISP_INDEX + c]][ns] -= 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.fcam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.fcam].get(1, 0));
( (A * xmxp + B * ymyp) * m_disp[s.tile][s.fcam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][s.fcam].get(1, 0));
}
}
if (par_map[DDISP_INDEX + s.scam]>= 0){ // par_map[DDISP_INDEX + last_cam] always <0
jt[par_map[DDISP_INDEX + s.scam]][ns] -= 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.scam].get(1, 0));
((A * xmxp + B * ymyp) * m_disp[s.tile][s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][s.scam].get(1, 0));
} else if (s.scam == last_cam) {
for (int c = 0; c < NUM_CAMS; c++) if ((c != last_cam) && (par_map[DDISP_INDEX + c] >= 0)) {
jt[par_map[DDISP_INDEX + c]][ns] += 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.scam].get(1, 0));
((A * xmxp + B * ymyp) * m_disp[s.tile][s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.tile][s.scam].get(1, 0));
}
}
/*
if (((par_map[DDISP_INDEX + s.fcam] >= 0) || (s.fcam == last_cam)) &&
(((par_map[DDISP_INDEX + s.scam]>= 0) || (s.scam == last_cam)))) {
if (s.fcam != last_cam) {
if (par_map[DDISP_INDEX + s.fcam] >= 0) jt[par_map[DDISP_INDEX + s.fcam]][ns] += 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.fcam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.fcam].get(1, 0));
} else { // last camera - use all others with minus sign
for (int c = 0; c < NUM_CAMS; c++) if ((c != last_cam) && (par_map[DDISP_INDEX + c] >=0)) {
jt[par_map[DDISP_INDEX + c]][ns] -= 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.fcam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.fcam].get(1, 0));
}
}
if (s.scam != last_cam) {
if (par_map[DDISP_INDEX + s.scam] >= 0) jt[par_map[DDISP_INDEX + s.scam]][ns] -= 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.scam].get(1, 0));
} else {
for (int c = 0; c < NUM_CAMS; c++) if ((c != last_cam) && (par_map[DDISP_INDEX + c] >= 0)) {
jt[par_map[DDISP_INDEX + c]][ns] += 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.scam].get(0, 0)+
(B * xmxp + C * ymyp) * m_disp[s.scam].get(1, 0));
}
}
}
*/
// np += ncam -1;// -1 for the last_cam
// process ndisp
for (int f = 0; f < ncam; f++) if (par_map[NDISP_INDEX + f] >= 0) {
jt[par_map[NDISP_INDEX + f]][ns] = 0.0;
}
if (par_map[NDISP_INDEX + s.fcam] >=0){
jt[par_map[NDISP_INDEX + s.fcam]][ns] += 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.fcam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.fcam].get(1, 1));
( (A * xmxp + B * ymyp) * m_disp[s.tile][s.fcam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][s.fcam].get(1, 1));
}
if (par_map[NDISP_INDEX + s.scam] >= 0) {
jt[par_map[NDISP_INDEX + s.scam]][ns] -= 2 * WGp *
((A * xmxp + B * ymyp) * m_disp[s.scam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.scam].get(1, 1));
( (A * xmxp + B * ymyp) * m_disp[s.tile][s.scam].get(0, 1)+
(B * xmxp + C * ymyp) * m_disp[s.tile][s.scam].get(1, 1));
}
// np += ncam;
}
}
// int np = 0;
for (int n = 0; n < NUM_CAMS; n++) { // av[DDISP_INDEX +last_cam] is already populated
fx[num_samples + n] = av[DDISP_INDEX + n];
fx[num_samples + NUM_CAMS + n] = av[NDISP_INDEX + n];
......@@ -564,11 +616,6 @@ public class Corr2dLMA {
// and derivatives
if (jt != null) {
// if (par_mask[DISP_INDEX]) np++;
// if (par_mask[A_INDEX]) np++;
// if (par_mask[B_INDEX]) np++;
// if (par_mask[CMA_INDEX]) np++;
// np+= npairs; // now it points to the ddisp block
for (int i = 0; i < NUM_CAMS; i++) {
if ((i != last_cam) && (par_map[DDISP_INDEX + i] >= 0)) {
for (int j = 0; j < NUM_CAMS; j++) { // j - column
......@@ -577,7 +624,6 @@ public class Corr2dLMA {
jt[par_map[DDISP_INDEX + i]][num_samples + last_cam] = -1.0;
}
}
// np now points at the first ndisp
for (int i = 0; i < NUM_CAMS; i++) {
if (par_map[NDISP_INDEX + i] >= 0) {
for (int j = 0; j < NUM_CAMS; j++) { // j - column
......@@ -593,10 +639,18 @@ public class Corr2dLMA {
public void printParams() { // not used in lwir
for (int np = 0; np < all_pars.length; np++) {
String parname;
if (np < G0_INDEX) parname = PAR_NAMES[np];
else if (np < DDISP_INDEX) parname = PAR_NAME_SCALE;
else if (np < NDISP_INDEX) parname = PAR_NAME_CORRDISP;
else parname = PAR_NAME_CORRNDISP;
// if (np < G0_INDEX) parname = PAR_NAMES[np];
// else if (np < DDISP_INDEX) parname = PAR_NAME_SCALE;
// else if (np < NDISP_INDEX) parname = PAR_NAME_CORRDISP;
// else parname = PAR_NAME_CORRNDISP;
if (np >= NDISP_INDEX) parname = PAR_NAME_CORRNDISP + (np - NDISP_INDEX);
else if (np >= DDISP_INDEX) parname = PAR_NAME_CORRDISP + (np - DDISP_INDEX);
else {
int ntile = np / TILE_PARAMS;
int anpr = np % TILE_PARAMS;
if (anpr < G0_INDEX) parname = PAR_NAMES[anpr]+"-"+ntile;
else parname = PAR_NAME_SCALE +"-"+ntile + ":"+ (anpr - G0_INDEX);
}
System.out.println(String.format("%2d%1s %22s %f",
np,
......@@ -618,7 +672,7 @@ public class Corr2dLMA {
double fx_pos = fx[i];
if (i < samples.size()) {
s = samples.get(i);
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f fx=%9.6f w=%9.7f fcam=%1d scam=%1d", i, s.ix, s.iy, s.v, fx_pos, s.w, s.fcam, s.scam));
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f fx=%9.6f w=%9.7f fcam=%1d scam=%1d tile=%d", i, s.ix, s.iy, s.v, fx_pos, s.w, s.fcam, s.scam, s.tile));
}
else {
System.out.println(String.format("%3d: %2s %2s v=%9.6f fx=%9.6f w=%9.7f", i, "-", "-", this.values[i], fx_pos, this.weights[i]));
......@@ -627,7 +681,7 @@ public class Corr2dLMA {
} else {
int ns =0;
for (Sample s:samples){
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f w=%9.7f fcam=%1d scam=%1d", ns++, s.ix, s.iy, s.v, s.w, s.fcam, s.scam));
System.out.println(String.format("%3d: x=%2d y=%2d v=%9.6f w=%9.7f fcam=%1d scam=%1d tile=%d", ns++, s.ix, s.iy, s.v, s.w, s.fcam, s.scam, s.tile));
}
}
}
......@@ -643,28 +697,37 @@ public class Corr2dLMA {
return all_pars;
}
public double [] getDisparityStrength() { // USED in lwir
if (pair_weights == null) return null;
public double [] getDisparityStrength(int nTile) { // USED in lwir
//// if (pair_weights == null) return null;
double disparity = -all_pars[DISP_INDEX];
double sum_amp = 0.0;
for (int i = 0; i < NUM_PAIRS; i++) {
sum_amp += pair_weights[i] * all_pars[G0_INDEX + i]; // group_weights is normalized
//// sum_amp += pair_weights[i] * all_pars[G0_INDEX + i]; // group_weights is normalized
sum_amp += all_pars[G0_INDEX + i + TILE_PARAMS * nTile]; // group_weights is normalized
}
// protect from weird fitting results
double max_amp = 0.0;
for (Sample s: samples) if (s.v > max_amp) max_amp = s.v;
for (Sample s: samples) if ((s.v > max_amp) && (s.tile == nTile)) max_amp = s.v;
if (sum_amp > 1.25 * max_amp) sum_amp = max_amp;
double [] ds = {disparity, sum_amp};
return ds;
}
public double [] getDisparityStrengthABC() {// width = 1/sqrt(all_pars[A_INDEX])
double [] ds = getDisparityStrength();
public double [] getDisparityStrengthABC(int nTile) {// width = 1/sqrt(all_pars[A_INDEX])
double [] ds = getDisparityStrength(nTile);
if (ds == null) return null;
double [] dsw = {ds[0], ds[1], all_pars[A_INDEX], all_pars[B_INDEX],all_pars[CMA_INDEX]}; // asymmetry
double [] dsw = {ds[0], ds[1], all_pars[A_INDEX + TILE_PARAMS * nTile], all_pars[B_INDEX+ TILE_PARAMS * nTile],all_pars[CMA_INDEX+ TILE_PARAMS * nTile]}; // asymmetry
return dsw;
}
public double [] getLazyEye() {
double [] rslt = new double [2 * NUM_CAMS];
for (int i = 0; i < rslt.length; i++) {
rslt[i] = all_pars[DDISP_INDEX + i];
}
return rslt;
}
public void toVector() { // USED in lwir
int np = 0;
for (int i = 0; i < par_mask.length; i++) if (par_mask[i]) np++;
......@@ -718,32 +781,36 @@ public class Corr2dLMA {
double [][] jt_delta = new double [num_pars][num_points];
double [] fx = getFxJt( vector,jt);
getFxJt(delta, vector,jt_delta);
System.out.println("Test of jt-jt_delta difference, delta = "+delta+ " ");
System.out.print(String.format(" %3s: %10s ", "#", "fx"));
System.out.println("Test of jt-jt_delta difference, delta = "+delta+ ":");
System.out.print(String.format("Til P %3s: %10s ", "#", "fx"));
for (int anp = 0; anp< all_pars.length; anp++) if(par_mask[anp]){
String parname;
if (anp < G0_INDEX) parname = PAR_NAMES[anp];
else if (anp < DDISP_INDEX) parname = PAR_NAME_SCALE + (anp - G0_INDEX);
else if (anp < NDISP_INDEX) parname = PAR_NAME_CORRDISP + (anp - DDISP_INDEX);
else parname = PAR_NAME_CORRNDISP + (anp - NDISP_INDEX);
if (anp >= NDISP_INDEX) parname = PAR_NAME_CORRNDISP + (anp - NDISP_INDEX);
else if (anp >= DDISP_INDEX) parname = PAR_NAME_CORRDISP + (anp - DDISP_INDEX);
else {
int ntile = anp / TILE_PARAMS;
int anpr = anp % TILE_PARAMS;
if (anpr < G0_INDEX) parname = PAR_NAMES[anpr]+"-"+ntile;
else parname = PAR_NAME_SCALE +"-"+ntile + ":"+ (anpr - G0_INDEX);
}
System.out.print(String.format("| %16s ", parname));
}
System.out.println();
int npair0 = -1;
for (int i = 0; i < num_points; i++) {
if (i < samples.size()) {
int npair = USED_PAIRS_MAP[samples.get(i).fcam][samples.get(i).scam];
int npair = USED_PAIRS_MAP[samples.get(i).tile][samples.get(i).fcam][samples.get(i).scam];
if (npair !=npair0) {
if (npair0 >=0) System.out.println();
npair0 = npair;
}
System.out.print(String.format("%1d %3d: %10.7f ", npair, i, fx[i]));
System.out.print(String.format("%3d %1d %3d: %10.7f ",samples.get(i).tile, npair, i, fx[i]));
} else {
System.out.print(String.format(" %3d: %10.7f ", i, fx[i]));
System.out.print(String.format(" - - %3d: %10.7f ", i, fx[i]));
}
for (int np = 0; np < num_pars; np++) {
System.out.print(String.format("|%8.5f %8.5f ", jt_delta[np][i], 1000*(jt[np][i] - jt_delta[np][i])));
// System.out.print(String.format("|%8.5f %8.5f ", jt_delta[np][i], 1000*(jt[np][i] - jt_delta[np][i])));
System.out.print(String.format("|%8.5f %8.5f ", jt_delta[np][i], 1.0 * (jt[np][i] - jt_delta[np][i])));
double adiff = Math.abs(jt[np][i] - jt_delta[np][i]);
if (adiff > max_diff[np]) {
max_diff[np] = adiff;
......@@ -751,7 +818,7 @@ public class Corr2dLMA {
}
System.out.println();
}
System.out.print(String.format(" %15s ", "Maximal diff:"));
System.out.print(String.format(" %15s ", "Maximal diff:"));
for (int np = 0; np < num_pars; np++) {
System.out.print(String.format("|%8s %8.5f ", "1/1000×", 1000*max_diff[np]));
}
......@@ -856,6 +923,7 @@ public class Corr2dLMA {
int debug_level)
{
boolean [] rslt = {false,false};
this.last_rms = null;
int iter = 0;
for (iter = 0; iter < num_iter; iter++) {
rslt = lmaStep(
......
......@@ -1752,15 +1752,369 @@ public class Correlation2d {
return rslt;
}
public Corr2dLMA corrLMA2( // USED in lwir
double [][] repackCluster(
double [][][] data,
int clust_width){
int clust_height = data.length/clust_width;
int corr_size = 2 * transform_size - 1;
int nslices = 0;
for (int i = 0; i < data.length; i++) {
if (data[i] != null) {
nslices = data[i].length;
break;
}
}
int out_width = (corr_size+1)*clust_width;
int out_height = (corr_size+1)*clust_height;
double [][] mosaic = new double [nslices][];
for (int ns = 0; ns < nslices; ns++) {
boolean slice_exists = false;
for (int i = 0; i < data.length; i++){
if ((data[i] != null) && (data[i][ns] != null)) {
slice_exists = true;
break;
}
}
if (slice_exists) {
mosaic[ns] = new double [out_height*out_width];
for (int cy = 0; cy < clust_height; cy++) {
for (int cx = 0; cx < clust_width; cx++) {
int nclust = cy * clust_width + cx;
int indx = cy * (corr_size+1)*out_width + cx * (corr_size+1);
if ((data[nclust] == null) || (data[nclust][ns] == null)) {
for (int i = 0; i < corr_size; i ++) {
for (int j = 0; j < corr_size; j ++) {
mosaic[ns][indx + j] = Double.NaN;
}
indx += out_width;
}
} else {
for (int i = 0; i < corr_size; i ++) {
for (int j = 0; j < corr_size; j ++) {
mosaic[ns][indx + j] = data[nclust][ns][i * corr_size + j];
}
indx += out_width;
}
}
}
}
}
}
return mosaic;
}
public Corr2dLMA corrLMA2( // multi-tile
ImageDttParameters imgdtt_params,
int clust_width,
double [][] corr_wnd, // correlation window to save on re-calculation of the window
double [] corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
double [] corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
double [][][] corrs, // per tile, per pair, 2 correlation in line-scan order
double [][][] disp_dist, // per tile, per camera disparity matrix as a 1d (linescan order)
int pair_mask, // which pairs to process
boolean run_poly_instead, // true - run LMA, false - run 2d polynomial approximation
// double sigma, // low-pass sigma to find maximum (and convex too
double[][] xcenter_str, // preliminary center x in pixels for largest baseline
double vasw_pwr, // value as weight to this power,
int debug_level,
int tileX, // just for debug output
int tileY
)
{
// corrs are organized as PAIRS, some are null if not used
// for each enabled and available pair find a maximum, filter convex and create sample list
boolean debug_graphic = (debug_level > -1);
boolean debug_second_all = true;
int clust_height = corrs.length/clust_width;
int ntiles = corrs.length;
DoubleGaussianBlur gb = null;
if (imgdtt_params.lma_sigma > 0) gb = new DoubleGaussianBlur();
int center = transform_size - 1;
int corr_size = 2 * transform_size - 1;
Corr2dLMA lma = new Corr2dLMA(corrs.length,transform_size, corr_wnd);
double [][][] dbg_corr = debug_graphic ? new double [corrs.length][][] : null;
double [][][] dbg_weights = debug_graphic ? new double [corrs.length][][] : null;
int dbg_out_width = (corr_size + 1) * clust_width;
int dbg_out_height = (corr_size + 1) * clust_height;
if (debug_graphic) {
double [][] dbg_corrs = repackCluster(
corrs,
clust_width);
(new ShowDoubleFloatArrays()).showArrays(
dbg_corrs,
dbg_out_width,
dbg_out_height,
true,
"corr_pairs-"+"_x"+tileX+"_y"+tileY);
}
int numpairs = 0;
for (int ntile = 0; ntile < ntiles; ntile++) if (corrs[ntile] != null){
double[][] corr = new double[corrs[ntile].length][];
double [][] filtWeight = new double [corrs[ntile].length][];
for (int npair = 0; npair < corrs[ntile].length; npair++) if ((corrs[ntile][npair] != null) && (((pair_mask >> npair) & 1) !=0)){
corr[npair] = corrs[ntile][npair].clone();
if (corr_wnd_inv_limited != null) {
for (int i = 0; i < corr.length; i++) {
corr[npair][i] *= corr_wnd_inv_limited[i];
}
}
if (imgdtt_params.lma_sigma > 0) {
gb.blurDouble(corr[npair], corr_size, corr_size, imgdtt_params.lma_sigma, imgdtt_params.lma_sigma, 0.01);
}
int imx = imgdtt_params.lma_soft_marg * (corr_size + 1);
for (int iy = imgdtt_params.lma_soft_marg; iy < (corr_size - imgdtt_params.lma_soft_marg); iy++) {
for (int ix = imgdtt_params.lma_soft_marg; ix < (corr_size - imgdtt_params.lma_soft_marg); ix++) {
int indx = iy * corr_size + ix;
if (corr[npair][indx] > corr[npair][imx]) imx = indx;
}
}
// filter convex
int ix0 = (imx % corr_size) - center; // signed, around center to match filterConvex
int iy0 = (imx / corr_size) - center; // signed, around center to match filterConvex
filtWeight[npair] = filterConvex(
corr[npair], // double [] corr_data,
imgdtt_params.cnvx_hwnd_size, // int hwin,
ix0, // int x0,
iy0, // int y0,
imgdtt_params.cnvx_add3x3, // boolean add3x3,
imgdtt_params.cnvx_weight, // double nc_cost,
(debug_level > 2)); // boolean debug);
// remove too small clusters, or all collinear
if (!checkCluster(
filtWeight[npair], // double [] weights,
corr_size, // int size,
imgdtt_params.cnvx_min_samples, // int min_samples,
imgdtt_params.cnvx_non_coll // boolean non_coll
)) {
filtWeight[npair] = null;
} else {
numpairs++;
if (dbg_corr != null) {
if (dbg_corr[ntile] == null) {
dbg_corr[ntile] = new double [corrs[ntile].length][];
}
dbg_corr [ntile][npair] = corr[npair];
}
if (dbg_weights != null) {
if (dbg_weights[ntile] == null) {
dbg_weights[ntile] = new double [corrs[ntile].length][];
}
dbg_weights[ntile][npair] = filtWeight[npair];
}
}
// Normalize weight for each pair to compensate for different number of convex samples?
}
// numpairs
if (numpairs >= imgdtt_params.cnvx_min_pairs) {
for (int npair = 0; npair < corrs[ntile].length; npair++) if (filtWeight[npair] != null){
int fcam = PAIRS[npair][0];
int scam = PAIRS[npair][1];
for (int i = 1; i < filtWeight[npair].length; i++) if (filtWeight[npair][i] > 0.0) {
int ix = i % corr_size; // >=0
int iy = i / corr_size; // >=0
double v = corrs[ntile][npair][i]; // not blurred
double w = filtWeight[npair][i];
if (vasw_pwr != 0) {
w *= Math.pow(Math.abs(v), vasw_pwr);
}
lma.addSample( // x = 0, y=0 - center
ntile, // tile
fcam, // int fcam, // first camera index
scam, // int scam, // second camera index
ix, // int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
iy, // int y, // y coordinate (0 - disparity axis)
v, // double v, // correlation value at that point
w); //double w){ // sample weight
}
}
}
}
if (debug_graphic) {
double [][] dbg_corrs = repackCluster(
dbg_corr,
clust_width);
(new ShowDoubleFloatArrays()).showArrays(
dbg_corrs,
dbg_out_width,
dbg_out_height,
true,
"corr_blurred"+"_x"+tileX+"_y"+tileY);
double [][] dbg_w = repackCluster(
dbg_weights,
clust_width);
(new ShowDoubleFloatArrays()).showArrays(
dbg_w,
dbg_out_width,
dbg_out_height,
true,
"corr_weights"+"_x"+tileX+"_y"+tileY);
}
lma.initVector( // USED in lwir
imgdtt_params.lma_adjust_wm, // boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
imgdtt_params.lma_adjust_ag, // boolean adjust_scales, // adjust 2D correlation scales - lma_adjust_ag
imgdtt_params.lma_adjust_wy, // boolean adjust_ellipse, // allow non-circular correlation maximums lma_adjust_wy
imgdtt_params.lma_adjust_wxy, // boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
imgdtt_params.lma_adjust_ly1, // boolean adjust_lazyeye_ortho, // adjust disparity corrections orthogonal to disparities lma_adjust_ly1
xcenter_str, // double [][] disp_str, // initial value of disparity/strength/?
imgdtt_params.lma_half_width, // double half_width, // A=1/(half_widh)^2 lma_half_width
imgdtt_params.lma_cost_wy, // double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
imgdtt_params.lma_cost_wxy // double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
);
lma.setMatrices(disp_dist);
lma.initMatrices(); // should be called after initVector and after setMatrices
boolean lmaSuccess = false;
if (debug_level > 1) {
System.out.println("Input data:");
lma.printInputDataFx(false);
lma.printParams();
}
lmaSuccess = lma.runLma(
imgdtt_params.lma_lambda_initial, // double lambda, // 0.1
imgdtt_params.lma_lambda_scale_good, // double lambda_scale_good,// 0.5
imgdtt_params.lma_lambda_scale_bad, // double lambda_scale_bad, // 8.0
imgdtt_params.lma_lambda_max, // double lambda_max, // 100
imgdtt_params.lma_rms_diff, // double rms_diff, // 0.001
imgdtt_params.lma_num_iter, // int num_iter, // 20
2); //4); // debug_level); // int debug_level) // > 3
lma.updateFromVector();
double [] rms = lma.getRMS();
if (debug_level > 0) {
System.out.println("LMA -> "+lmaSuccess+" RMS="+rms[0]+", pure RMS="+rms[1]);
lma.printParams();
}
if (debug_level > 1) {
System.out.println("Input data and approximation:");
lma.printInputDataFx(true);
}
// boolean debug_second_all = true;
if (debug_second_all) {
double [] all_pars_save = lma.all_pars.clone();
lma.initVector( // USED in lwir
imgdtt_params.lma_adjust_wm, // boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
imgdtt_params.lma_adjust_ag, // boolean adjust_scales, // adjust 2D correlation scales - lma_adjust_ag
imgdtt_params.lma_adjust_wy, // boolean adjust_ellipse, // allow non-circular correlation maximums lma_adjust_wy
true, // imgdtt_params.lma_adjust_wxy, // boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
true, // imgdtt_params.lma_adjust_ly1, // boolean adjust_lazyeye_ortho, // adjust disparity corrections orthogonal to disparities lma_adjust_ly1
xcenter_str, // double [][] disp_str, // initial value of disparity/strength/?
imgdtt_params.lma_half_width, // double half_width, // A=1/(half_widh)^2 lma_half_width
imgdtt_params.lma_cost_wy, // double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
imgdtt_params.lma_cost_wxy // double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
);
lma.all_pars = all_pars_save;
lma.toVector();
lma.setMatrices(disp_dist);
lma.initMatrices(); // should be called after initVector and after setMatrices
//boolean
lmaSuccess = false;
if (debug_level > 1) {
System.out.println("Input data:");
lma.printInputDataFx(false);
lma.printParams();
}
lmaSuccess = lma.runLma(
imgdtt_params.lma_lambda_initial, // double lambda, // 0.1
imgdtt_params.lma_lambda_scale_good, // double lambda_scale_good,// 0.5
imgdtt_params.lma_lambda_scale_bad, // double lambda_scale_bad, // 8.0
imgdtt_params.lma_lambda_max, // double lambda_max, // 100
imgdtt_params.lma_rms_diff, // double rms_diff, // 0.001
imgdtt_params.lma_num_iter, // int num_iter, // 20
2); //4); // debug_level); // int debug_level) // > 3
lma.updateFromVector();
//double []
rms = lma.getRMS();
if (debug_level > 0) {
System.out.println("LMA -> "+lmaSuccess+" RMS="+rms[0]+", pure RMS="+rms[1]);
lma.printParams();
}
if (debug_level > 1) {
System.out.println("Input data and approximation:");
lma.printInputDataFx(true);
}
}
if (debug_graphic && lmaSuccess) {
String [] sliceTitles = lma.dbgGetSliceTiles();
if (corrs.length == 1) {
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(0)[0],
corr_size,
corr_size,
true,
"corr_values"+"_x"+tileX+"_y"+tileY, sliceTitles);
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(1)[0],
corr_size,
corr_size,
true,
"corr_weights"+"_x"+tileX+"_y"+tileY, sliceTitles);
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(2)[0],
corr_size,
corr_size,
true,
"corr_fx"+"_x"+tileX+"_y"+tileY, sliceTitles);
} else {
(new ShowDoubleFloatArrays()).showArrays(
repackCluster(lma.dbgGetSamples(0),clust_width),
dbg_out_width,
dbg_out_height,
true,
"corr_values"+"_x"+tileX+"_y"+tileY, sliceTitles);
(new ShowDoubleFloatArrays()).showArrays(
repackCluster(lma.dbgGetSamples(1),clust_width),
dbg_out_width,
dbg_out_height,
true,
"corr_weights"+"_x"+tileX+"_y"+tileY, sliceTitles);
(new ShowDoubleFloatArrays()).showArrays(
repackCluster(lma.dbgGetSamples(2),clust_width),
dbg_out_width,
dbg_out_height,
true,
"corr_fx"+"_x"+tileX+"_y"+tileY, sliceTitles);
}
}
return lmaSuccess? lma: null;
}
public Corr2dLMA corrLMA2( // single tile
ImageDttParameters imgdtt_params,
double [][] corr_wnd, // correlation window to save on re-calculation of the window
double [] corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
double [][] corrs,
double [][] disp_dist, // per camera disparity matrix as a 1d (linescan order)
int pair_mask, // which pairs to process
boolean run_poly_instead, // true - run LMA, false - run 2d polynomial approximation
double sigma, // low-pass sigma to find maximum (and convex too
// double sigma, // low-pass sigma to find maximum (and convex too
double xcenter, // preliminary center x in pixels for largest baseline
double vasw_pwr, // value as weight to this power,
int debug_level,
......@@ -1772,10 +2126,10 @@ public class Correlation2d {
// for each enabled and available pair find a maximum, filter convex and create sample list
boolean debug_graphic = (debug_level > -1);
DoubleGaussianBlur gb = null;
if (sigma > 0) gb = new DoubleGaussianBlur();
if (imgdtt_params.lma_sigma > 0) gb = new DoubleGaussianBlur();
int center = transform_size - 1;
int corr_size = 2 * transform_size - 1;
Corr2dLMA lma = new Corr2dLMA(transform_size, corr_wnd);
Corr2dLMA lma = new Corr2dLMA(1,transform_size, corr_wnd);
double [][] dbg_corr = debug_graphic ? new double [corrs.length][] : null;
double [][] dbg_weights = debug_graphic ? new double [corrs.length][] : null;
......@@ -1792,16 +2146,24 @@ public class Correlation2d {
for (int npair = 0; npair < corrs.length; npair++) if ((corrs[npair] != null) && (((pair_mask >> npair) & 1) !=0)){
double[] corr = corrs[npair].clone();
if (corr_wnd_limited != null) {
if (corr_wnd_inv_limited != null) {
for (int i = 0; i < corr.length; i++) {
corr[i] /= corr_wnd_limited[i];
corr[i] *= corr_wnd_inv_limited[i];
}
}
if (sigma > 0) {
gb.blurDouble(corr, corr_size, corr_size, sigma, sigma, 0.01);
if (imgdtt_params.lma_sigma > 0) {
gb.blurDouble(corr, corr_size, corr_size, imgdtt_params.lma_sigma, imgdtt_params.lma_sigma, 0.01);
}
int imx = 0;
for (int i = 1; i < corr.length; i++) if (corr[i] > corr[imx]) imx = i;
int imx = imgdtt_params.lma_soft_marg * (corr_size + 1);
for (int iy = imgdtt_params.lma_soft_marg; iy < (corr_size - imgdtt_params.lma_soft_marg); iy++) {
for (int ix = imgdtt_params.lma_soft_marg; ix < (corr_size - imgdtt_params.lma_soft_marg); ix++) {
int indx = iy * corr_size + ix;
if (corr[indx] > corr[imx]) imx = indx;
}
}
// filter convex
int ix0 = (imx % corr_size) - center; // signed, around center to match filterConvex
int iy0 = (imx / corr_size) - center; // signed, around center to match filterConvex
......@@ -1829,12 +2191,13 @@ public class Correlation2d {
w *= Math.pow(Math.abs(v), vasw_pwr);
}
lma.addSample( // x = 0, y=0 - center
0, // tile
fcam, // int fcam, // first camera index
scam, // int scam, // second camera index
ix, // int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
iy, // int y, // y coordinate (0 - disparity axis)
v, // double v, // correlation value at that point
w); //double w){ // sample weight
ix, // int x, // x coordinate on the common scale (corresponding to the largest baseline), along the disparity axis
iy, // int y, // y coordinate (0 - disparity axis)
v, // double v, // correlation value at that point
w); //double w){ // sample weight
}
}
......@@ -1854,6 +2217,7 @@ public class Correlation2d {
"corr_weights"+"_x"+tileX+"_y"+tileY);
}
double [][] disp_str = {{xcenter, 1.0}}; // temporary
lma.initVector( // USED in lwir
imgdtt_params.lma_adjust_wm, // boolean adjust_width, // adjust width of the maximum - lma_adjust_wm
......@@ -1862,7 +2226,7 @@ public class Correlation2d {
imgdtt_params.lma_adjust_wxy, // boolean adjust_lazyeye_par, // adjust disparity corrections parallel to disparities lma_adjust_wxy
imgdtt_params.lma_adjust_ly1, // boolean adjust_lazyeye_ortho, // adjust disparity corrections orthogonal to disparities lma_adjust_ly1
xcenter, // double disp0, // initial value of disparity
disp_str, // xcenter, // double disp0, // initial value of disparity
imgdtt_params.lma_half_width, // double half_width, // A=1/(half_widh)^2 lma_half_width
imgdtt_params.lma_cost_wy, // double cost_lazyeye_par, // cost for each of the non-zero disparity corrections lma_cost_wy
imgdtt_params.lma_cost_wxy // double cost_lazyeye_odtho // cost for each of the non-zero ortho disparity corrections lma_cost_wxy
......@@ -1899,31 +2263,44 @@ public class Correlation2d {
}
if (debug_graphic && lmaSuccess) {
String [] sliceTitles = lma.dbgGetSliceTiles();
if (corrs.length == 1) { // only for single-tile cluster
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(0)[0],
corr_size,
corr_size,
true,
"corr_values"+"_x"+tileX+"_y"+tileY, sliceTitles);
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(1)[0],
corr_size,
corr_size,
true,
"corr_weights"+"_x"+tileX+"_y"+tileY, sliceTitles);
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(2)[0],
corr_size,
corr_size,
true,
"corr_fx"+"_x"+tileX+"_y"+tileY, sliceTitles);
}
}
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(0),
corr_size,
corr_size,
true,
"corr_values"+"_x"+tileX+"_y"+tileY, sliceTitles);
return lmaSuccess? lma: null;
}
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(1),
corr_size,
corr_size,
true,
"corr_weights"+"_x"+tileX+"_y"+tileY, sliceTitles);
/*
double [][] dbg_w = repackCluster(
dbg_weights,
clust_width);
(new ShowDoubleFloatArrays()).showArrays(
lma.dbgGetSamples(2),
corr_size,
corr_size,
dbg_w,
dbg_out_width,
dbg_out_height,
true,
"corr_fx"+"_x"+tileX+"_y"+tileY, sliceTitles);
}
"corr_weights"+"_x"+tileX+"_y"+tileY);
return lmaSuccess? lma: null;
}
*/
public Correlations2dLMA corrLMA( // USED in lwir
ImageDttParameters imgdtt_params,
......@@ -2338,6 +2715,45 @@ public class Correlation2d {
return lmaSuccess? lma: null;
}
/**
* Verify that 2D correlation pair has enough enabled samples and they are not all collinear (if requested)
* @param weights 2D correlation weights (selected if > 0) in line-scan order
* @param size of the square (15 for transform size 8)
* @param min_samples minimal required number of non-zero samples
* @param non_coll requre non-collinear samples
* @return true if the data matches requirements
*/
public boolean checkCluster(
double [] weights,
int size,
int min_samples,
boolean non_coll
) {
int num_samples = 0;
int [][] first2 = new int [2][2];
boolean noncolinOK = !non_coll;
for (int iy = 0; iy < size; iy++) {
for (int ix = 0; ix < size; ix++) {
if (weights[iy * size + ix] > 0) {
if (num_samples == 0) {
first2[0][0] = ix; // x0
first2[0][1] = iy; // y0
} else if (num_samples == 1) {
first2[1][0] = ix - first2[0][0]; // dx1
first2[1][1] = iy - first2[0][1]; // dy1
} else {
noncolinOK |= ((ix-first2[0][0])*first2[1][1] != (iy-first2[0][1])*first2[1][0]);
}
num_samples++;
if (noncolinOK && (num_samples >= min_samples)) {
return true;
}
}
}
}
return false;
}
/**
* Create mask of usable points, allowing only first non bi-convex away from the center
* @param corr_data correlation data, packed in linescan order
......
......@@ -1512,6 +1512,640 @@ public class ImageDtt {
}
}
// removing macro and FPGA modes
public double [][][][][][] clt_aberrations_quad_corr_min( // USED in LWIR
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
// final int macro_scale, // to correlate tile data instead of the pixel data: 1 - pixels, 8 - tiles
final int [][] tile_op, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
final double [][][] image_data, // first index - number of image in a quad
final boolean [][] saturation_imp, // (near) saturated pixels or null
// correlation results - final and partial
// final double [][][][] clt_corr_combo, // [type][tilesY][tilesX][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// [type][tilesY][tilesX] should be set by caller
// types: 0 - selected correlation (product+offset), 1 - sum
// final double [][][][][] clt_corr_partial,// [tilesY][tilesX][quad]color][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// [tilesY][tilesX] should be set by caller
// When clt_mismatch is non-zero, no far objects extraction will be attempted
// final double [][] clt_mismatch, // [12][tilesY * tilesX] // ***** transpose unapplied ***** ?. null - do not calculate
// values in the "main" directions have disparity (*_CM) subtracted, in the perpendicular - as is
final double [][] disparity_map, // [8][tilesY][tilesX], only [6][] is needed on input or null - do not calculate
// last 2 - contrast, avg/ "geometric average)
// final double [][][][] texture_tiles, // [tilesY][tilesX]["RGBA".length()][]; null - will skip images combining
final int width,
final double corr_fat_zero, // add to denominator to modify phase correlation (same units as data1, data2). <0 - pure sum
// final boolean corr_sym,
// final double corr_offset,
final double corr_red,
final double corr_blue,
final double corr_sigma,
// final boolean corr_normalize, // normalize correlation results by rms
final double min_corr, // 0.02; // minimal correlation value to consider valid
// final double max_corr_sigma, // 1.2; // weights of points around global max to find fractional
// final double max_corr_radius, // 3.9;
// final boolean max_corr_double, //"Double pass when masking center of mass to reduce preference for integer values
// final int corr_mode, // Correlation mode: 0 - integer max, 1 - center of mass, 2 - polynomial
// final double min_shot, // 10.0; // Do not adjust for shot noise if lower than
// final double scale_shot, // 3.0; // scale when dividing by sqrt ( <0 - disable correction)
// final double diff_sigma, // 5.0;//RMS difference from average to reduce weights (~ 1.0 - 1/255 full scale image)
// final double diff_threshold, // 5.0; // RMS difference from average to discard channel (~ 1.0 - 1/255 full scale image)
// final boolean diff_gauss, // true; // when averaging images, use Gaussian around average as weight (false - sharp all/nothing)
// final double min_agree, // 3.0; // minimal number of channels to agree on a point (real number to work with fuzzy averages)
// final boolean dust_remove, // Do not reduce average weight when only one image differs much from the average
// final boolean keep_weights, // Add port weights to RGBA stack (debug feature)
final GeometryCorrection geometryCorrection,
final GeometryCorrection geometryCorrection_main, // if not null correct this camera (aux) to the coordinates of the main
final double [][][][][][] clt_kernels, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
final int kernel_step,
final int transform_size,
final int window_type,
final double [][] shiftXY, // [port]{shiftX,shiftY}
final double disparity_corr, // disparity at infinity
// final double [][][] fine_corr, // quadratic coefficients for fine correction (or null)
// final double corr_magic_scale, // still not understood coefficient that reduces reported disparity value. Seems to be around 0.85
final double shiftX, // shift image horizontally (positive - right) - just for testing
final double shiftY, // shift image vertically (positive - down)
final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
final int debug_tileX,
final int debug_tileY,
// final boolean no_fract_shift,
// final boolean no_deconvolution,
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
final boolean debug_distort= true;
// final double [][] debug_offsets = null;
final double [][] debug_offsets = imgdtt_params.lma_dbg_offset;
// final double [][] debug_offsets = {{ 0.5, 0.0},{ -0.5, 0.0},{-0.5, 0.0},{ 0.5, 0.0}}; // add to calculated CenterXY for evaluating new LMA
// final double [][] debug_offsets = {{ 1.0, 0.0},{ -1.0, 0.0},{-1.0, 0.0},{ 1.0, 0.0}}; // add to calculated CenterXY for evaluating new LMA
// final double [][] debug_offsets = {{ 0.0, 1.0},{ 0.0, -1.0},{ 0.0, -1.0},{ 0.0, 1.0}}; // add to calculated CenterXY for evaluating new LMA
final int quad = 4; // number of subcameras
final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
// final int numColors = image_data[0].length;
final int height=image_data[0][0].length/width;
final int tilesX=width/transform_size;
final int tilesY=height/transform_size;
final int clustersX= (tilesX + tileStep - 1) / tileStep;
final int clustersY= (tilesY + tileStep - 1) / tileStep;
// final int nTilesInChn=tilesX*tilesY;
final int nClustersInChn=clustersX * clustersY;
final int clustSize = tileStep*tileStep;
///tileStep
final double [][][][][][] clt_data = new double[quad][numcol][tilesY][tilesX][][];
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final double [] col_weights= new double [numcol]; // colors are RBG
final double [][] dbg_distort = debug_distort? (new double [4*quad][tilesX*tilesY]) : null;
// keep for now for mono, find out what do they mean for macro mode
if (isMonochrome()) {
col_weights[2] = 1.0;// green color/mono
col_weights[0] = 0;
col_weights[1] = 0;
} else {
col_weights[2] = 1.0/(1.0 + corr_red + corr_blue); // green color
col_weights[0] = corr_red * col_weights[2];
col_weights[1] = corr_blue * col_weights[2];
}
final int corr_size = transform_size * 2 -1;
if ((globalDebugLevel > -10) && (disparity_corr != 0.0)){
System.out.println(String.format("Using manual infinity disparity correction of %8.5f pixels",disparity_corr));
}
final double [] enh_ortho_scale = new double [corr_size];
for (int i = 0; i < corr_size; i++){
if ((i < (transform_size - imgdtt_params.getEnhOrthoWidth(isAux()))) || (i > (transform_size - 2 + imgdtt_params.getEnhOrthoWidth(isAux())))) {
enh_ortho_scale[i] = 1.0;
} else {
enh_ortho_scale[i] = imgdtt_params.getEnhOrthoScale(isAux());
}
if (i == (transform_size-1)) enh_ortho_scale[i] = 0.0 ; // hardwired 0 in the center
enh_ortho_scale[i] *= Math.sin(Math.PI*(i+1.0)/(2*transform_size));
}
if (globalDebugLevel > 1){
System.out.println("getEnhOrthoWidth(isAux())="+ imgdtt_params.getEnhOrthoWidth(isAux())+" getEnhOrthoScale(isAux())="+ imgdtt_params.getEnhOrthoScale(isAux()));
for (int i = 0; i < corr_size; i++){
System.out.println(" enh_ortho_scale["+i+"]="+ enh_ortho_scale[i]);
}
}
// Create window to select center correlation strip using
// ortho_height - full width of non-zero elements
// ortho_eff_height - effective height (ration of the weighted column sum to the center value)
int wcenter = transform_size - 1;
final double [] ortho_weights = new double [corr_size]; // [15]
for (int i = 0; i < corr_size; i++){
if ((i >= wcenter - imgdtt_params.ortho_height/2) && (i <= wcenter + imgdtt_params.ortho_height/2)) {
double dx = 1.0*(i-wcenter)/(imgdtt_params.ortho_height/2 + 1);
ortho_weights[i] = 0.5*(1.0+Math.cos(Math.PI*dx))/imgdtt_params.ortho_eff_height;
}
}
if (globalDebugLevel > 0){
System.out.println("ortho_height="+ imgdtt_params.ortho_height+" ortho_eff_height="+ imgdtt_params.ortho_eff_height);
for (int i = 0; i < corr_size; i++){
System.out.println(" ortho_weights["+i+"]="+ ortho_weights[i]);
}
}
if (globalDebugLevel > 0) {
System.out.println("clt_aberrations_quad_corr(): width="+width+" height="+height+" transform_size="+transform_size+
" debug_tileX="+debug_tileX+" debug_tileY="+debug_tileY+" globalDebugLevel="+globalDebugLevel);
}
final int transform_len = transform_size * transform_size;
final double [] filter_direct= new double[transform_len];
if (corr_sigma == 0) {
filter_direct[0] = 1.0;
for (int i= 1; i<filter_direct.length;i++) filter_direct[i] =0;
} else {
for (int i = 0; i < transform_size; i++){
for (int j = 0; j < transform_size; j++){
filter_direct[i*transform_size+j] = Math.exp(-(i*i+j*j)/(2*corr_sigma)); // FIXME: should be sigma*sigma !
}
}
}
// normalize
double sum = 0;
for (int i = 0; i < transform_size; i++){
for (int j = 0; j < transform_size; j++){
double d = filter_direct[i*transform_size+j];
d*=Math.cos(Math.PI*i/(2*transform_size))*Math.cos(Math.PI*j/(2*transform_size));
if (i > 0) d*= 2.0;
if (j > 0) d*= 2.0;
sum +=d;
}
}
for (int i = 0; i<filter_direct.length; i++){
filter_direct[i] /= sum;
}
DttRad2 dtt = new DttRad2(transform_size);
final double [] filter= dtt.dttt_iiie(filter_direct);
for (int i=0; i < filter.length;i++) filter[i] *= 2*transform_size;
// prepare disparity maps and weights
if (globalDebugLevel > 0){
// System.out.println("max_corr_radius= "+max_corr_radius);
System.out.println("corr_fat_zero= "+corr_fat_zero);
System.out.println("disparity_array[0][0]= "+disparity_array[0][0]);
}
// add optional initialization of debug layers here
if (disparity_map != null){
for (int i = 0; i<disparity_map.length;i++){
if (i < OVEREXPOSED) {
disparity_map[i] = new double [tilesY*tilesX];
} else if (i == OVEREXPOSED) {
if (saturation_imp!= null) {
disparity_map[i] = new double [tilesY*tilesX];
}
}
}
}
dtt.set_window(window_type);
final double [] lt_window = dtt.getWin2d(); // [256]
final double [] lt_window2 = new double [lt_window.length]; // squared
for (int i = 0; i < lt_window.length; i++) lt_window2[i] = lt_window[i] * lt_window[i];
if (globalDebugLevel > 1) {
ShowDoubleFloatArrays sdfa_instance = new ShowDoubleFloatArrays(); // just for debugging?
sdfa_instance.showArrays(lt_window, 2*transform_size, 2*transform_size, "lt_window");
}
Matrix [] corr_rots_aux = null;
if (geometryCorrection_main != null) {
corr_rots_aux = geometryCorrection.getCorrVector().getRotMatrices(geometryCorrection.getRotMatrix(true));
}
final boolean use_main = corr_rots_aux != null;
final Matrix [] corr_rots = use_main ? corr_rots_aux : geometryCorrection.getCorrVector().getRotMatrices(); // get array of per-sensor rotation matrices
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
DttRad2 dtt = new DttRad2(transform_size);
dtt.set_window(window_type);
int tileY,tileX, clustX, clustY, cTile, tIndex; // , chn;
// showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
double centerX; // center of aberration-corrected (common model) tile, X
double centerY; //
double [][] fract_shiftsXY = new double[quad][];
double [][] corr_wnd = (new Corr2dLMA(1, transform_size, null)).getCorrWnd();
double [] corr_wnd_inv_limited = null;
if (imgdtt_params.lma_min_wnd <= 1.0) {
corr_wnd_inv_limited = new double [corr_wnd.length * corr_wnd[0].length];
for (int i = imgdtt_params.lma_hard_marg; i < (corr_wnd.length - imgdtt_params.lma_hard_marg); i++) {
for (int j = imgdtt_params.lma_hard_marg; j < (corr_wnd.length - imgdtt_params.lma_hard_marg); j++) {
corr_wnd_inv_limited[i * (corr_wnd.length) + j] = 1.0/Math.max(Math.pow(corr_wnd[i][j],imgdtt_params.lma_wnd_pwr), imgdtt_params.lma_min_wnd);
}
}
}
Correlation2d corr2d = new Correlation2d(
imgdtt_params, // ImageDttParameters imgdtt_params,
transform_size, // int transform_size,
2.0, // double wndx_scale, // (wndy scale is always 1.0)
isMonochrome(), // boolean monochrome,
(globalDebugLevel > -1)); // boolean debug)
corr2d.createOrtoNotch(
imgdtt_params.getEnhOrthoWidth(isAux()), // double getEnhOrthoWidth(isAux()),
imgdtt_params.getEnhOrthoScale(isAux()), //double getEnhOrthoScale(isAux()),
(imgdtt_params.lma_debug_level > 1)); // boolean debug);
// for (int nTile = ai.getAndIncrement(); nTile < nTilesInChn; nTile = ai.getAndIncrement()) {
for (int nCluster = ai.getAndIncrement(); nCluster < nClustersInChn; nCluster = ai.getAndIncrement()) {
clustY = nCluster / clustersX;
clustX = nCluster % clustersX;
double [][][] centersXY = new double [clustSize][][];
double [][][] disp_dist = new double[clustSize][quad][]; // used to correct 3D correlations
double [][][] corrs = new double [clustSize][][];
double [][] corr_stat = new double [clustSize][];
double [] strength = new double [clustSize];
double [] disparity = new double [clustSize];
boolean debugCluster = (clustX == (debug_tileX / tileStep)) && (clustY == (debug_tileY / tileStep));
int clust_lma_debug_level = debugCluster? imgdtt_params.lma_debug_level : -5;
for (int cTileY = 0; cTileY < tileStep; cTileY++) {
tileY = clustY * tileStep + cTileY ;
if (tileY < tilesY) {
for (int cTileX = 0; cTileX < tileStep; cTileX++) {
tileX = clustX * tileStep + cTileX ;
if (tileX < tilesX) {
// tileY = nTile /tilesX;
// tileX = nTile % tilesX;
cTile = cTileY * tileStep + cTileX;
tIndex = tileY * tilesX + tileX;
int nTile = tileY * tilesX + tileX; // how is it different from tIndex?
if (tile_op[tileY][tileX] == 0) continue; // nothing to do for this tile
boolean debugTile =(tileX == debug_tileX) && (tileY == debug_tileY);
final int [] overexp_all = (saturation_imp != null) ? ( new int [2]): null;
// Moved from inside chn loop
centerX = tileX * transform_size + transform_size/2 - shiftX;
centerY = tileY * transform_size + transform_size/2 - shiftY;
// TODO: move port coordinates out of color channel loop
// double [][] centersXY;
// double [][] disp_dist = new double[quad][]; // used to correct 3D correlations
if ((disparity_array == null) || (disparity_array[tileY] == null) || (Double.isNaN(disparity_array[tileY][tileX]))) {
System.out.println("Bug with disparity_array !!!");
continue; // nothing to do for this tile
}
if (use_main) { // this is AUX camera that uses main coordinates // not used in lwir
centersXY[cTile] = geometryCorrection.getPortsCoordinatesAndDerivatives(
geometryCorrection_main, // GeometryCorrection gc_main,
true, // boolean use_rig_offsets,
corr_rots, // Matrix [] rots,
null, // Matrix [][] deriv_rots,
null, // double [][] pXYderiv, // if not null, should be double[8][]
disp_dist[cTile], // used to correct 3D correlations
centerX,
centerY,
disparity_array[tileY][tileX] + disparity_corr); // _aux); // + disparity_corr);
} else { // used in lwir
centersXY[cTile] = geometryCorrection.getPortsCoordinatesAndDerivatives(
geometryCorrection, // GeometryCorrection gc_main,
false, // boolean use_rig_offsets,
corr_rots, // Matrix [] rots,
null, // Matrix [][] deriv_rots,
null, // double [][] pXYderiv, // if not null, should be double[8][]
disp_dist[cTile], // used to correct 3D correlations
centerX,
centerY,
disparity_array[tileY][tileX] + disparity_corr);
}
if (((globalDebugLevel > 0) || debug_distort) && debugTile) {
for (int i = 0; i < quad; i++) {
System.out.println("clt_aberrations_quad_corr(): tileX="+tileX+", tileY="+tileY+
" centerX="+centerX+" centerY="+centerY+" disparity="+disparity_array[tileY][tileX]+
" centersXY["+cTile+"]["+i+"][0]="+centersXY[0][i][0]+" centersXY["+cTile+"]["+i+"][1]="+centersXY[cTile][i][1]);
}
}
if (debug_distort && debugCluster && (debug_offsets != null)) {
double [][] debug_offsets_xy = new double [debug_offsets.length][2];
for (int i = 0; i < debug_offsets.length; i++) {
debug_offsets_xy[i][0] = disp_dist[cTile][i][0] * debug_offsets[i][0] + disp_dist[cTile][i][1] * debug_offsets[i][1];
debug_offsets_xy[i][1] = disp_dist[cTile][i][2] * debug_offsets[i][0] + disp_dist[cTile][i][3] * debug_offsets[i][1];
}
for (int i = 0; i < quad; i++) {
System.out.println(String.format("%d: {%8.3f, %8.3f}",i,debug_offsets_xy[i][0],debug_offsets_xy[i][1]));
}
for (int i = 0; i < debug_offsets.length; i++) {
centersXY[cTile][i][0] += debug_offsets_xy[i][0];
centersXY[cTile][i][1] += debug_offsets_xy[i][1];
}
for (int i = 0; i < quad; i++) {
System.out.println("Corrected clt_aberrations_quad_corr(): tileX="+tileX+", tileY="+tileY+
" centerX="+centerX+" centerY="+centerY+" disparity="+disparity_array[tileY][tileX]+
" centersXY["+cTile+"]["+i+"][0]="+centersXY[cTile][i][0]+" centersXY["+cTile+"]["+i+"][1]="+centersXY[cTile][i][1]);
}
}
if ((globalDebugLevel > -1) && (tileX == debug_tileX) && (tileY == debug_tileY)) { // before correction
System.out.print(disparity_array[tileY][tileX]+"\t"+
centersXY[cTile][0][0]+"\t"+centersXY[cTile][0][1]+"\t"+
centersXY[cTile][1][0]+"\t"+centersXY[cTile][1][1]+"\t"+
centersXY[cTile][2][0]+"\t"+centersXY[cTile][2][1]+"\t"+
centersXY[cTile][3][0]+"\t"+centersXY[cTile][3][1]+"\t");
}
for (int ip = 0; ip < centersXY[cTile].length; ip++){
centersXY[cTile][ip][0] -= shiftXY[ip][0];
centersXY[cTile][ip][1] -= shiftXY[ip][1];
}
// save disparity distortions for visualization:
for (int cam = 0; cam <quad; cam++) {
dbg_distort[cam * 4 + 0 ][nTile] = disp_dist[cTile][cam][0];
dbg_distort[cam * 4 + 1 ][nTile] = disp_dist[cTile][cam][1];
dbg_distort[cam * 4 + 2 ][nTile] = disp_dist[cTile][cam][2];
dbg_distort[cam * 4 + 3 ][nTile] = disp_dist[cTile][cam][3];
}
for (int ncol = 0; ncol <numcol; ncol++) {
if (!isMonochrome() || (ncol == MONO_CHN)) { // in monochrome mode skip all non-mono (green) channels // used in lwir (5 of 6 branches)
if ((globalDebugLevel > -1) && (tileX == debug_tileX) && (tileY == debug_tileY) && (ncol == 2)) {
System.out.println("\nUsing "+"PIXEL"+" mode, centerX="+centerX+", centerY="+centerY);
System.out.println(disparity_array[tileY][tileX]+"\t"+
centersXY[cTile][0][0]+"\t"+centersXY[cTile][0][1]+"\t"+
centersXY[cTile][1][0]+"\t"+centersXY[cTile][1][1]+"\t"+
centersXY[cTile][2][0]+"\t"+centersXY[cTile][2][1]+"\t"+
centersXY[cTile][3][0]+"\t"+centersXY[cTile][3][1]+"\t");
}
for (int i = 0; i < quad; i++) {
clt_data[i][ncol][tileY][tileX] = new double [4][];
// Extract image tiles and kernels, correct aberrations, return (ut do not apply) fractional shifts
fract_shiftsXY[i] = extract_correct_tile( // return a pair of residual offsets
image_data[i],
width, // image width
((clt_kernels == null) ? null : clt_kernels[i]), // [color][tileY][tileX][band][pixel]
clt_data[i][ncol][tileY][tileX], //double [][] clt_tile, // should be double [4][];
kernel_step,
transform_size,
dtt,
ncol,
centersXY[cTile][i][0], // centerX, // center of aberration-corrected (common model) tile, X
centersXY[cTile][i][1], // centerY, //
((!FPGA_COMPARE_DATA && (globalDebugLevel > -1) && (tileX == debug_tileX) && (tileY == debug_tileY) && (ncol == 2) && (i==0)) ? (globalDebugLevel + 0) : 0), // external tile compare
false, // no_deconvolution,
false, // ); // transpose);
((saturation_imp != null) ? saturation_imp[i] : null), //final boolean [][] saturation_imp, // (near) saturated pixels or null
((saturation_imp != null) ? overexp_all: null)); // final double [] overexposed)
} // for (int i = 0; i < quad; i++)
if ((globalDebugLevel > -1) && (tileX == debug_tileX) && (tileY == debug_tileY) && (ncol == 2)) {
System.out.println();
}
if ((globalDebugLevel > 0) && (debug_tileX == tileX) && (debug_tileY == tileY) && (ncol == 2) && !FPGA_COMPARE_DATA) {
ShowDoubleFloatArrays sdfa_instance = new ShowDoubleFloatArrays(); // just for debugging?
String [] titles = {"CC0","SC0","CS0","SS0","CC1","SC1","CS1","SS1","CC2","SC2","CS2","SS2","CC3","SC3","CS3","SS3"};
double [][] dbg_tile = new double [16][];
for (int i = 0; i < 16; i++) dbg_tile[i]=clt_data[i>>2][ncol][tileY][tileX][i & 3];
sdfa_instance.showArrays(dbg_tile, transform_size, transform_size, true, "pre-shifted_x"+tileX+"_y"+tileY, titles);
}
if ((globalDebugLevel > 0) && (tileX >= debug_tileX - 2) && (tileX <= debug_tileX + 2) &&
(tileY >= debug_tileY - 2) && (tileY <= debug_tileY+2)) {
for (int i = 0; i < quad; i++) {
System.out.println("clt_aberrations_quad(): color="+ncol+", tileX="+tileX+", tileY="+tileY+
" fract_shiftsXY["+i+"][0]="+fract_shiftsXY[i][0]+" fract_shiftsXY["+i+"][1]="+fract_shiftsXY[i][1]);
}
}
// apply residual shift
for (int i = 0; i < quad; i++) {
fract_shift( // fractional shift in transform domain. Currently uses sin/cos - change to tables with 2? rotations
clt_data[i][ncol][tileY][tileX], // double [][] clt_tile,
transform_size,
fract_shiftsXY[i][0], // double shiftX,
fract_shiftsXY[i][1], // double shiftY,
// (globalDebugLevel > 0) && (tileX == debug_tileX) && (tileY == debug_tileY)); // external tile compare
((globalDebugLevel > 1) &&
((ncol==0) || isMonochrome()) &&
(tileX >= debug_tileX - 2) && (tileX <= debug_tileX + 2) &&
(tileY >= debug_tileY - 2) && (tileY <= debug_tileY+2)));
}
if ((globalDebugLevel > 0) && (debug_tileX == tileX) && (debug_tileY == tileY)) {
ShowDoubleFloatArrays sdfa_instance = new ShowDoubleFloatArrays(); // just for debugging?
String [] titles = {"CC0","SC0","CS0","SS0","CC1","SC1","CS1","SS1","CC2","SC2","CS2","SS2","CC3","SC3","CS3","SS3"};
double [][] dbg_tile = new double [16][];
for (int i = 0; i < 16; i++) dbg_tile[i]=clt_data[i>>2][ncol][tileY][tileX][i & 3];
sdfa_instance.showArrays(dbg_tile, transform_size, transform_size, true, "shifted_x"+tileX+"_y"+tileY+"-z", titles);
}
} else { // if (!isMonochrome() || (chn == MONO_CHN) || macro_mode) { // in monochrome mode skip all non-mono (green) channels
for (int i = 0; i < quad; i++) { // used in lwir
clt_data[i][ncol] = null; // erase unused clt_data
}
}
}// end of for (int chn = 0; chn <numcol; chn++)
// used in lwir
int tile_lma_debug_level = ((tileX == debug_tileX) && (tileY == debug_tileY))? imgdtt_params.lma_debug_level : -1;
// all color channels are done here
if (disparity_map != null){ // not null - calculate correlations
for (int i = 0; i < disparity_map.length; i++) {
if (disparity_map[i] != null) disparity_map[i][nTile] = (
(i == DISPARITY_STRENGTH_INDEX) ||
(i == DISPARITY_INDEX_HOR_STRENGTH) ||
(i == DISPARITY_INDEX_VERT_STRENGTH)) ? 0.0 : Double.NaN; // once and for all
}
// calculate overexposed fraction
if (saturation_imp != null){
disparity_map[OVEREXPOSED][nTile] = (1.0 * overexp_all[0]) / overexp_all[1];
}
// calculate all selected pairs correlations
int all_pairs = imgdtt_params.dbg_pair_mask; //TODO: use tile tasks
corrs[cTile] = corr2d.correlateCompositeFD( // now works with nulls for some clt_data colors
clt_data, // double [][][][][][] clt_data,
tileX, // int tileX,
tileY, // int tileY,
all_pairs, // int pairs_mask,
filter, // double [] lpf,
scale_strengths, // double scale_value, // scale correlation value
col_weights, // double [] col_weights,
corr_fat_zero); // double fat_zero)
// calculate interpolated "strips" to match different scales and orientations (ortho/diagonal) on the
// fine (0.5 pix) grid. ortho for scale == 1 provide even/even samples (1/4 of all), diagonal ones -
// checkerboard pattern
double [][] strips = corr2d.scaleRotateInterpoateCorrelations(
corrs[cTile], // double [][] correlations,
all_pairs, // int pairs_mask,
imgdtt_params.corr_strip_hight, //); // int hwidth);
(tile_lma_debug_level > 0) ? all_pairs:0); // debugMax);
// Combine strips for selected pairs. Now using only for all available pairs.
// Other combinations are used only if requested (clt_corr_partial != null)
double [] strip_combo = corr2d.combineInterpolatedCorrelations(
strips, // double [][] strips,
all_pairs, // int pairs_mask,
imgdtt_params.corr_offset, // double offset);
imgdtt_params.twice_diagonal); // boolean twice_diagonal)
// calculate CM maximums for all mixed channels
// First get integer correlation center, relative to the center
int [] ixy = corr2d.getMaxXYInt( // find integer pair or null if below threshold
strip_combo, // double [] data,
true, // boolean axis_only,
imgdtt_params.min_corr, // double minMax, // minimal value to consider (at integer location, not interpolated)
tile_lma_debug_level > 0); // boolean debug);
// double [] corr_stat = null;
// if integer argmax was strong enough, calculate CM argmax
// will not fill out DISPARITY_INDEX_INT+1, DISPARITY_INDEX_CM+1, DISPARITY_INDEX_POLY+1
// use clt_mismatch for that
// double strength = 0.0;
// double disparity = 0.0;
if (ixy != null) {
strength[cTile] = strip_combo[ixy[0]+transform_size-1]; // strength at integer max on axis
disparity_map[DISPARITY_INDEX_INT][tIndex] = -ixy[0];
disparity_map[DISPARITY_STRENGTH_INDEX][tIndex] = strength[cTile];
if (Double.isNaN(disparity_map[DISPARITY_STRENGTH_INDEX][tIndex])) {
System.out.println("BUG: 1. disparity_map[DISPARITY_STRENGTH_INDEX]["+tIndex+"] should not be NaN");
}
corr_stat[cTile] = corr2d.getMaxXCm( // get fractional center as a "center of mass" inside circle/square from the integer max
strip_combo, // double [] data, // [data_size * data_size]
ixy[0], // int ixcenter, // integer center x
// corr_wndy, // double [] window_y, // (half) window function in y-direction(perpendicular to disparity: for row0 ==1
// corr_wndx, // double [] window_x, // half of a window function in x (disparity) direction
(tile_lma_debug_level > 0)); // boolean debug);
}
// proceed only if CM correlation result is non-null // for compatibility with old code we need it to run regardless of the strength of the normal correlation
if (corr_stat[cTile] != null) {
disparity[cTile] = -corr_stat[cTile][0];
disparity_map[DISPARITY_INDEX_CM][tIndex] = disparity[cTile]; // disparity is negative X
if (tile_lma_debug_level > 0) {
System.out.println("Will run getMaxXSOrtho( ) for tileX="+tileX+", tileY="+tileY);
}
// debug new LMA correlations
if (debugTile) {
System.out.println("Will run new LMA for tileX="+tileX+", tileY="+tileY);
/*
Corr2dLMA lma2 = corr2d.corrLMA2(
imgdtt_params, // ImageDttParameters imgdtt_params,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs[cTile], // double [][] corrs,
disp_dist[cTile],
imgdtt_params.dbg_pair_mask, // int pair_mask, // which pairs to process
false, // boolean run_poly_instead, // true - run LMA, false - run 2d polynomial approximation
corr_stat[cTile][0], // double xcenter, // preliminary center x in pixels for largest baseline
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
tile_lma_debug_level+2, // int debug_level,
tileX, // int tileX, // just for debug output
tileY ); // int tileY
*/
}
} // end of if (corr_stat != null)
} // if (disparity_map != null){ // not null - calculate correlations
// only debug is left
}
}
}
}
if (debugCluster) {
System.out.println("Will run new LMA for clustX="+clustX+", clustY="+clustY);
Corr2dLMA lma2 = corr2d.corrLMA2(
imgdtt_params, // ImageDttParameters imgdtt_params,
tileStep, // int clust_width,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_inv_limited, // corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // [tIndex], // double [][] corrs,
disp_dist, // [tIndex],
imgdtt_params.dbg_pair_mask, // int pair_mask, // which pairs to process
false, // boolean run_poly_instead, // true - run LMA, false - run 2d polynomial approximation
corr_stat, // double[][] xcenter_str, // preliminary center x in pixels for largest baseline
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
clust_lma_debug_level + 1, // 2, // int debug_level,
clustX, // int tileX, // just for debug output
clustY ); // int tileY
}
/*
public Corr2dLMA corrLMA2( // multi-tile
ImageDttParameters imgdtt_params,
int clust_width,
double [][] corr_wnd, // correlation window to save on re-calculation of the window
double [] corr_wnd_inv_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
double [][][] corrs, // per tile, per pair, 2 correlation in line-scan order
double [][][] disp_dist, // per tile, per camera disparity matrix as a 1d (linescan order)
int pair_mask, // which pairs to process
boolean run_poly_instead, // true - run LMA, false - run 2d polynomial approximation
double xcenter, // preliminary center x in pixels for largest baseline
double vasw_pwr, // value as weight to this power,
int debug_level,
int tileX, // just for debug output
int tileY
)
*/
}
}
};
}
startAndJoin(threads);
// final double [][] dbg_distort = debug_distort? (new double [4*quad][tilesX*tilesY]) : null;
if ((dbg_distort != null) &&(globalDebugLevel >=0)) {
(new ShowDoubleFloatArrays()).showArrays(dbg_distort, tilesX, tilesY, true, "disparity_distortions"); // , dbg_titles);
}
/*
if (dbg_ports_coords != null) {
(new showDoubleFloatArrays()).showArrays(dbg_ports_coords, tilesX, tilesY, true, "ports_coordinates", dbg_titles);
}
*/
return clt_data;
}
public double [][][][][][] clt_aberrations_quad_corr_new( // USED in LWIR
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
......@@ -1797,16 +2431,14 @@ public class ImageDtt {
double [][][] tcorr_partial = null; // [quad][numcol+1][15*15]
double [][][][] tcorr_tpartial = null; // [quad][numcol+1][4][8*8]
double [] ports_rgb = null;
double [][] corr_wnd = (new Corr2dLMA(transform_size, null)).getCorrWnd();
double [] corr_wnd_limited = null;
double [][] corr_wnd = (new Corr2dLMA(1, transform_size, null)).getCorrWnd();
double [] corr_wnd_inv_limited = null;
if (imgdtt_params.lma_min_wnd <= 1.0) {
corr_wnd_limited = new double [corr_wnd.length * corr_wnd[0].length];
int indx = 0;
for (int i = 0; i < corr_wnd.length; i++) {
for (int j = 0; j < corr_wnd[i].length; j++) {
corr_wnd_limited[indx++] = Math.max(corr_wnd[i][j], imgdtt_params.lma_min_wnd);
corr_wnd_inv_limited = new double [corr_wnd.length * corr_wnd[0].length];
for (int i = imgdtt_params.lma_hard_marg; i < (corr_wnd.length - imgdtt_params.lma_hard_marg); i++) {
for (int j = imgdtt_params.lma_hard_marg; j < (corr_wnd.length - imgdtt_params.lma_hard_marg); j++) {
corr_wnd_inv_limited[i * (corr_wnd.length) + j] = 1.0/Math.max(Math.pow(corr_wnd[i][j],imgdtt_params.lma_wnd_pwr), imgdtt_params.lma_min_wnd);
}
}
}
......@@ -2343,12 +2975,11 @@ public class ImageDtt {
Corr2dLMA lma2 = corr2d.corrLMA2(
imgdtt_params, // ImageDttParameters imgdtt_params,
corr_wnd, // double [][] corr_wnd, // correlation window to save on re-calculation of the window
corr_wnd_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corr_wnd_inv_limited, // corr_wnd_limited, // correlation window, limited not to be smaller than threshold - used for finding max/convex areas (or null)
corrs, // double [][] corrs,
disp_dist,
imgdtt_params.dbg_pair_mask, // int pair_mask, // which pairs to process
false, // boolean run_poly_instead, // true - run LMA, false - run 2d polynomial approximation
0.5, // double sigma, // low-pass sigma to find maximum (and convex too
corr_stat[0], // double xcenter, // preliminary center x in pixels for largest baseline
imgdtt_params.ortho_vasw_pwr, // double vasw_pwr, // value as weight to this power,
tile_lma_debug_level+2, // int debug_level,
......@@ -2938,6 +3569,9 @@ public class ImageDtt {
return clt_data;
}
public boolean dmExists(double [][] dm, int indx) { // not used in lwir or else
return (dm != null) && (dm.length > indx) && (dm[indx]!= null);
}
......
......@@ -78,6 +78,11 @@ public class ImageDttParameters {
public int cnvx_hwnd_size = 4; // half window size (both horizontal and vertical to extract bi-convex cells
public double cnvx_weight = 0.5; // relative weight of non-convex (border) cell
public boolean cnvx_add3x3 = true; // always select 3x3 cells around integer maximum
public int cnvx_min_samples = 5; // minimal number of used samples on a 2D correlation pair
public boolean cnvx_non_coll = true; // require non-collinear samples in a valid correlation pair
public int cnvx_min_pairs = 5; // minimal number 2D correlation pairs per tile
// pole window will be inverted
public int corr_strip_notch = 11; // number of rows to calculate for vertical poles
......@@ -102,8 +107,15 @@ public class ImageDttParameters {
// new LMA parameters
public double lma_min_wnd = 0.4; // divide values by the 2D correlation window if it is >= this value for finding maximums and convex areas
public double lma_wnd_pwr = 0.8; // Raise window for finding a maximum and a convex region to this power
public int lma_hard_marg = 1; // Zero out this width margins before blurring
public int lma_soft_marg = 2; // Do not look for maximums inside this width margins
public double lma_sigma = 0.7; // Blur correlation before finding maximum and convex region
// maybe try using sqrt (corr_wnd) ? or variable power?
public double lma_half_width = 2.0; //
public double lma_cost_wy = 0.003; // cost of parallel-to-disparity correction
public double lma_cost_wxy = 0.003; // cost of ortho-to-disparity correction
......@@ -117,6 +129,7 @@ public class ImageDttParameters {
public int lma_debug_level = 3; //
public boolean corr_var_cam = true; // New correlation mode compatible with 8 subcameras
public double cm_max_normalization = 0.55; // fraction of correlation maximum radius, being squared multiplied by maximum to have the same total mass
public double [][] lma_dbg_offset = new double [4][2]; //{{ 1.0, 0.0},{ -1.0, 0.0},{-1.0, 0.0},{ 1.0, 0.0}}; // new double [4][2];
public int getEnhOrthoWidth(boolean aux) {
return aux ? enhortho_width_aux : enhortho_width;
......@@ -217,6 +230,16 @@ public class ImageDttParameters {
gd.addCheckbox ("Always select 3x3 cells around integer maximum", this.cnvx_add3x3,
"Add 3x3 cells selection around the original argmax, regardless of bi-convex property");
gd.addNumericField("Min samples per pair", this.cnvx_min_samples, 0, 3, "pix",
"Minimal number of used samples on a 2D correlation pair");
gd.addCheckbox ("Always select 3x3 cells around integer maximum", this.cnvx_non_coll,
"Must have non-collinear samples in each correlation pair");
gd.addNumericField("Minimal correlation pairs per tile", this.cnvx_min_pairs, 0, 3, "",
"Minimal number 2D correlation pairs per tile");
gd.addMessage("Window for pole detection mode");
gd.addNumericField("Strip height for pole detection", this.corr_strip_notch, 0, 3, "half-pix",
"Number of rows to combine/interpolate correlation results. Rows are twice denser than pixels correponding to largest baseline disparity");
......@@ -259,6 +282,14 @@ public class ImageDttParameters {
gd.addNumericField("Minimal window value for normalization during max/convex", this.lma_min_wnd, 3, 6, "",
"divide values by the 2D correlation window if it is >= this value for finding maximums and convex areas");
gd.addNumericField("LMA window power", this.lma_wnd_pwr, 3, 6, "",
"Raise window for finding a maximum and a convex region to this power");
gd.addNumericField("LMA hard margin", this.lma_hard_marg, 0, 3, "",
"Zero out this width margins before blurring");
gd.addNumericField("LMA soft margins iterations", this.lma_soft_marg, 0, 3, "",
"Do not look for maximums inside this width margins");
gd.addNumericField("LMA blur sigma", this.lma_sigma, 3, 6, "",
"Blur correlation before finding maximum and convex region");
gd.addNumericField("Initial/expected half-width of the correlation maximum in both directions", this.lma_half_width, 3, 6, "pix",
"With LPF sigma = 0.9 it seems to be ~= 2.0. Used both as initial parameter and the fitted value difference from this may be penalized");
......@@ -287,7 +318,27 @@ public class ImageDttParameters {
gd.addNumericField("Normalization for the CM correlation strength", this.cm_max_normalization, 6, 8, "",
"Fraction of correlation maximum radius, being squared multiplied by maximum to have the same total mass. ~= 0.5, the lower the value, the higher strength reported by the CM");
// public double cm_max_normalization = 0.55; //
gd.addMessage("Cameras offsets in the disparity direction and orthogonal to disparity (debugging LMA)");
gd.addNumericField("LMA debug offset: camera0, parallel", this.lma_dbg_offset[0][0], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera0, ortho", this.lma_dbg_offset[0][1], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera1, parallel", this.lma_dbg_offset[1][0], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera1, ortho", this.lma_dbg_offset[1][1], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera2, parallel", this.lma_dbg_offset[2][0], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera2, ortho", this.lma_dbg_offset[2][1], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera3, parallel", this.lma_dbg_offset[3][0], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
gd.addNumericField("LMA debug offset: camera3, ortho", this.lma_dbg_offset[3][1], 6, 8, "pix",
"Add camera offset in the direction of disparity (to/from center)");
// public double cm_max_normalization = 0.55; //
}
public void dialogAnswers(GenericJTabbedDialog gd) {
......@@ -342,6 +393,10 @@ public class ImageDttParameters {
this.cnvx_weight = gd.getNextNumber();
this.cnvx_add3x3 = gd.getNextBoolean();
this.cnvx_min_samples= (int) gd.getNextNumber();
this.cnvx_non_coll = gd.getNextBoolean();
this.cnvx_min_pairs= (int) gd.getNextNumber();
this.corr_strip_notch= (int) gd.getNextNumber();
this.corr_notch_hwidth= gd.getNextNumber();
this.corr_notch_blur= gd.getNextNumber();
......@@ -365,6 +420,11 @@ public class ImageDttParameters {
this.lma_min_wnd = gd.getNextNumber();
this.lma_wnd_pwr = gd.getNextNumber();
this.lma_hard_marg= (int) gd.getNextNumber();
this.lma_soft_marg= (int) gd.getNextNumber();
this.lma_sigma = gd.getNextNumber();
this.lma_half_width = gd.getNextNumber();
this.lma_cost_wy = gd.getNextNumber();
this.lma_cost_wxy = gd.getNextNumber();
......@@ -380,6 +440,10 @@ public class ImageDttParameters {
this.corr_var_cam = gd.getNextBoolean();
this.cm_max_normalization= gd.getNextNumber();
for (int i = 0; i < 4; i++) for (int j=0; j < 2; j++) {
this.lma_dbg_offset[i][j]= gd.getNextNumber();
}
}
......@@ -433,6 +497,9 @@ public class ImageDttParameters {
properties.setProperty(prefix+"cnvx_weight", this.cnvx_weight +"");
properties.setProperty(prefix+"cnvx_add3x3", this.cnvx_add3x3 +"");
properties.setProperty(prefix+"cnvx_min_samples", this.cnvx_min_samples +"");
properties.setProperty(prefix+"cnvx_non_coll", this.cnvx_non_coll +"");
properties.setProperty(prefix+"cnvx_min_pairs", this.cnvx_min_pairs +"");
properties.setProperty(prefix+"corr_strip_notch", this.corr_strip_notch +"");
properties.setProperty(prefix+"corr_notch_hwidth", this.corr_notch_hwidth +"");
......@@ -458,6 +525,11 @@ public class ImageDttParameters {
properties.setProperty(prefix+"lma_min_wnd", this.lma_min_wnd +"");
properties.setProperty(prefix+"lma_wnd_pwr", this.lma_wnd_pwr +"");
properties.setProperty(prefix+"lma_hard_marg", this.lma_hard_marg +"");
properties.setProperty(prefix+"lma_soft_marg", this.lma_soft_marg +"");
properties.setProperty(prefix+"lma_sigma", this.lma_sigma +"");
properties.setProperty(prefix+"lma_half_width", this.lma_half_width +"");
properties.setProperty(prefix+"lma_cost_wy", this.lma_cost_wy +"");
properties.setProperty(prefix+"lma_cost_wxy", this.lma_cost_wxy +"");
......@@ -473,6 +545,10 @@ public class ImageDttParameters {
properties.setProperty(prefix+"corr_var_cam", this.corr_var_cam +"");
properties.setProperty(prefix+"cm_max_normalization", this.cm_max_normalization +"");
for (int i = 0; i < 4; i++) for (int j=0; j < 2; j++) {
properties.setProperty(prefix+"lma_dbg_offset_"+i+"_"+j, this.lma_dbg_offset[i][j] +"");
}
}
......@@ -528,7 +604,9 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"cnvx_weight")!=null) this.cnvx_weight=Double.parseDouble(properties.getProperty(prefix+"cnvx_weight"));
if (properties.getProperty(prefix+"cnvx_add3x3")!=null) this.cnvx_add3x3=Boolean.parseBoolean(properties.getProperty(prefix+"cnvx_add3x3"));
if (properties.getProperty(prefix+"cnvx_min_samples")!=null) this.cnvx_min_samples=Integer.parseInt(properties.getProperty(prefix+"cnvx_min_samples"));
if (properties.getProperty(prefix+"cnvx_non_coll")!=null) this.cnvx_non_coll=Boolean.parseBoolean(properties.getProperty(prefix+"cnvx_non_coll"));
if (properties.getProperty(prefix+"cnvx_min_pairs")!=null) this.cnvx_min_pairs=Integer.parseInt(properties.getProperty(prefix+"cnvx_min_pairs"));
if (properties.getProperty(prefix+"corr_strip_notch")!=null) this.corr_strip_notch=Integer.parseInt(properties.getProperty(prefix+"corr_strip_notch"));
if (properties.getProperty(prefix+"corr_notch_hwidth")!=null) this.corr_notch_hwidth=Double.parseDouble(properties.getProperty(prefix+"corr_notch_hwidth"));
......@@ -556,6 +634,13 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"lma_min_wnd")!=null) this.lma_min_wnd=Double.parseDouble(properties.getProperty(prefix+"lma_min_wnd"));
if (properties.getProperty(prefix+"lma_wnd_pwr")!=null) this.lma_wnd_pwr=Double.parseDouble(properties.getProperty(prefix+"lma_wnd_pwr"));
if (properties.getProperty(prefix+"lma_hard_marg")!=null) this.lma_hard_marg=Integer.parseInt(properties.getProperty(prefix+"lma_hard_marg"));
if (properties.getProperty(prefix+"lma_soft_marg")!=null) this.lma_soft_marg=Integer.parseInt(properties.getProperty(prefix+"lma_soft_marg"));
if (properties.getProperty(prefix+"lma_sigma")!=null) this.lma_sigma=Double.parseDouble(properties.getProperty(prefix+"lma_sigma"));
if (properties.getProperty(prefix+"lma_half_width")!=null) this.lma_half_width=Double.parseDouble(properties.getProperty(prefix+"lma_half_width"));
if (properties.getProperty(prefix+"lma_cost_wy")!=null) this.lma_cost_wy=Double.parseDouble(properties.getProperty(prefix+"lma_cost_wy"));
if (properties.getProperty(prefix+"lma_cost_wxy")!=null) this.lma_cost_wxy=Double.parseDouble(properties.getProperty(prefix+"lma_cost_wxy"));
......@@ -571,6 +656,14 @@ public class ImageDttParameters {
if (properties.getProperty(prefix+"corr_var_cam")!=null) this.corr_var_cam=Boolean.parseBoolean(properties.getProperty(prefix+"corr_var_cam"));
if (properties.getProperty(prefix+"cm_max_normalization")!=null) this.cm_max_normalization=Double.parseDouble(properties.getProperty(prefix+"cm_max_normalization"));
for (int i = 0; i < 4; i++) for (int j=0; j < 2; j++) {
if (properties.getProperty(prefix+"lma_dbg_offset_"+i+"_"+j)!=null) this.lma_dbg_offset[i][j]=Double.parseDouble(properties.getProperty(prefix+"lma_dbg_offset_"+i+"_"+j));
}
}
@Override
......@@ -625,6 +718,10 @@ public class ImageDttParameters {
idp.cnvx_weight= this.cnvx_weight;
idp.cnvx_add3x3= this.cnvx_add3x3;
idp.cnvx_min_samples= this.cnvx_min_samples;
idp.cnvx_non_coll= this.cnvx_non_coll;
idp.cnvx_min_pairs= this.cnvx_min_pairs;
idp.corr_strip_notch= this.corr_strip_notch;
idp.corr_notch_hwidth= this.corr_notch_hwidth;
idp.corr_notch_blur= this.corr_notch_blur;
......@@ -648,6 +745,11 @@ public class ImageDttParameters {
idp.lma_min_wnd = this.lma_min_wnd;
idp.lma_wnd_pwr = this.lma_wnd_pwr;
idp.lma_hard_marg = this.lma_hard_marg;
idp.lma_soft_marg = this.lma_soft_marg;
idp.lma_sigma = this.lma_sigma;
idp.lma_half_width = this.lma_half_width;
idp.lma_cost_wy = this.lma_cost_wy;
idp.lma_cost_wxy = this.lma_cost_wxy;
......@@ -663,6 +765,10 @@ public class ImageDttParameters {
idp.corr_var_cam = this.corr_var_cam;
idp.cm_max_normalization= this.cm_max_normalization;
idp.lma_dbg_offset= new double [this.lma_dbg_offset.length][];
for (int i = 0; i < idp.lma_dbg_offset.length; i++) {
idp.lma_dbg_offset[i] = this.lma_dbg_offset[i].clone();
}
return idp;
}
......
......@@ -3599,6 +3599,86 @@ public class QuadCLT {
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
}
public void processCLTQuadCorrsTest( // not used in lwir
CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
ColorProcParameters colorProcParameters,
CorrectionColorProc.ColorGainsParameters channelGainParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
// int convolveFFTSize, // 128 - fft size, kernel size should be size/2
final boolean apply_corr, // calculate and apply additional fine geometry correction
final boolean infinity_corr, // calculate and apply geometry correction at infinity
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel)
{
if (infinity_corr && (clt_parameters.z_correction != 0.0)){
System.out.println(
"****************************************\n"+
"* Resetting manual infinity correction *\n"+
"****************************************\n");
clt_parameters.z_correction = 0.0;
}
this.startTime=System.nanoTime();
String [] sourceFiles=correctionsParameters.getSourcePaths();
SetChannels [] set_channels=setChannels(debugLevel);
if ((set_channels == null) || (set_channels.length==0)) {
System.out.println("No files to process (of "+sourceFiles.length+")");
return;
}
// multiply each image by this and divide by individual (if not NaN)
double [] referenceExposures = null;
if (!colorProcParameters.lwir_islwir) {
referenceExposures=eyesisCorrections.calcReferenceExposures(debugLevel);
}
for (int nSet = 0; nSet < set_channels.length; nSet++){
int [] channelFiles = set_channels[nSet].fileNumber();
boolean [][] saturation_imp = (clt_parameters.sat_level > 0.0)? new boolean[channelFiles.length][] : null;
double [] scaleExposures = new double[channelFiles.length];
ImagePlus [] imp_srcs = conditionImageSet(
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
colorProcParameters,
sourceFiles, // String [] sourceFiles,
set_channels[nSet].name(), // String set_name,
referenceExposures, // double [] referenceExposures,
channelFiles, // int [] channelFiles,
scaleExposures, //output // double [] scaleExposures
saturation_imp, //output // boolean [][] saturation_imp,
debugLevel); // int debugLevel);
// once per quad here
processCLTQuadCorrTest( // returns ImagePlus, but it already should be saved/shown
imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
colorProcParameters,
channelGainParameters,
rgbParameters,
scaleExposures,
apply_corr, // calculate and apply additional fine geometry correction
infinity_corr, // calculate and apply geometry correction at infinity
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevel);
//Runtime.getRuntime().gc();
if (debugLevel >-1) System.out.println("Processing set "+(nSet+1)+" (of "+set_channels.length+") finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
if (eyesisCorrections.stopRequested.get()>0) {
System.out.println("User requested stop");
System.out.println("Processing "+(nSet + 1)+" file sets (of "+set_channels.length+") finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
return;
}
}
System.out.println("processCLTQuadCorrs(): processing "+getTotalFiles(set_channels)+" files ("+set_channels.length+" file sets) finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
}
public void channelGainsEqualize( // USED in lwir
boolean gain_equalize,
......@@ -3876,7 +3956,6 @@ public class QuadCLT {
return offsets;
}
public ImagePlus [] processCLTQuadCorr( // USED in lwir
ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
boolean [][] saturation_imp, // (near) saturated pixels or null
......@@ -4497,6 +4576,183 @@ public class QuadCLT {
return results;
}
public ImagePlus [] processCLTQuadCorrTest( // USED in lwir
ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
boolean [][] saturation_imp, // (near) saturated pixels or null
CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
ColorProcParameters colorProcParameters,
CorrectionColorProc.ColorGainsParameters channelGainParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
// int convolveFFTSize, // 128 - fft size, kernel size should be size/2
double [] scaleExposures, // probably not needed here
final boolean apply_corr, // calculate and apply additional fine geometry correction
final boolean infinity_corr, // calculate and apply geometry correction at infinity
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel){
final boolean batch_mode = clt_parameters.batch_run; //disable any debug images
boolean advanced= this.correctionsParameters.zcorrect || this.correctionsParameters.equirectangular;
boolean toRGB= advanced? true: this.correctionsParameters.toRGB;
ShowDoubleFloatArrays sdfa_instance = new ShowDoubleFloatArrays(); // just for debugging?
// may use this.StartTime to report intermediate steps execution times
// String aux = isAux()?"-AUX":"";
String name=this.correctionsParameters.getModelName((String) imp_quad[0].getProperty("name"));
// int channel= Integer.parseInt((String) imp_src.getProperty("channel"));
String path= (String) imp_quad[0].getProperty("path");
ImagePlus [] results = new ImagePlus[imp_quad.length];
for (int i = 0; i < results.length; i++) {
results[i] = imp_quad[i];
results[i].setTitle(results[i].getTitle()+"RAW");
}
if (debugLevel>1) System.out.println("processing: "+path);
double [][][] double_stacks = new double [imp_quad.length][][];
for (int i = 0; i < double_stacks.length; i++){
double_stacks[i] = eyesisCorrections.bayerToDoubleStack(
imp_quad[i], // source Bayer image, linearized, 32-bit (float))
null, // no margins, no oversample
this.is_mono);
}
ImageDtt image_dtt = new ImageDtt(isMonochrome(),clt_parameters.getScaleStrength(isAux()));
for (int i = 0; i < double_stacks.length; i++){
if ( double_stacks[i].length > 2) {
for (int j =0 ; j < double_stacks[i][0].length; j++){
double_stacks[i][2][j]*=0.5; // Scale green 0.5 to compensate more pixels than R,B
}
} else {
for (int j =0 ; j < double_stacks[i][0].length; j++){
double_stacks[i][0][j]*=1.0; // Scale mono by 1/4 - to have the same overall "gain" as for bayer
}
}
}
setTiles (imp_quad[0], // set global tp.tilesX, tp.tilesY
clt_parameters,
threadsMax);
// temporary setting up tile task file (one integer per tile, bitmask
// for testing defined for a window, later the tiles to process will be calculated based on previous passes results
int [][] tile_op = tp.setSameTileOp(clt_parameters, clt_parameters.tile_task_op, debugLevel);
double [][] disparity_array = tp.setSameDisparity(clt_parameters.disparity); // 0.0); // [tp.tilesY][tp.tilesX] - individual per-tile expected disparity
//TODO: Add array of default disparity - use for combining images in force disparity mode (no correlation), when disparity is predicted from other tiles
double [][][][] clt_corr_combo = null;
double [][][][][] clt_corr_partial = null; // [tp.tilesY][tp.tilesX][pair][color][(2*transform_size-1)*(2*transform_size-1)]
double [][] clt_mismatch = null; // [3*4][tp.tilesY * tp.tilesX] // transpose unapplied
double [][][][] texture_tiles = null; // [tp.tilesY][tp.tilesX]["RGBA".length()][]; // tiles will be 16x16, 2 visualization mode full 16 or overlapped
// undecided, so 2 modes of combining alpha - same as rgb, or use center tile only
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
if (clt_parameters.correlate){ // true
clt_corr_combo = new double [ImageDtt.TCORR_TITLES.length][tilesY][tilesX][];
texture_tiles = new double [tilesY][tilesX][][]; // ["RGBA".length()][];
for (int i = 0; i < tilesY; i++){
for (int j = 0; j < tilesX; j++){
for (int k = 0; k<clt_corr_combo.length; k++){
clt_corr_combo[k][i][j] = null;
}
texture_tiles[i][j] = null;
}
}
if (!infinity_corr && clt_parameters.corr_keep){ // true
clt_corr_partial = new double [tilesY][tilesX][][][];
for (int i = 0; i < tilesY; i++){
for (int j = 0; j < tilesX; j++){
clt_corr_partial[i][j] = null;
}
}
} // clt_parameters.corr_mismatch = false
if (clt_parameters.corr_mismatch || apply_corr || infinity_corr){ // added infinity_corr
clt_mismatch = new double [12][]; // What is 12?// not used in lwir
}
}
// Includes all 3 colors - will have zeros in unused
double [][] disparity_map = new double [ImageDtt.DISPARITY_TITLES.length][]; //[0] -residual disparity, [1] - orthogonal (just for debugging) last 4 - max pixel differences
double min_corr_selected = clt_parameters.min_corr;
double [][] shiftXY = new double [4][2];
if (!clt_parameters.fine_corr_ignore) {
double [][] shiftXY0 = {
{clt_parameters.fine_corr_x_0,clt_parameters.fine_corr_y_0},
{clt_parameters.fine_corr_x_1,clt_parameters.fine_corr_y_1},
{clt_parameters.fine_corr_x_2,clt_parameters.fine_corr_y_2},
{clt_parameters.fine_corr_x_3,clt_parameters.fine_corr_y_3}};
shiftXY = shiftXY0;
}
double z_correction = clt_parameters.z_correction;
if (clt_parameters.z_corr_map.containsKey(name)){
z_correction +=clt_parameters.z_corr_map.get(name);// not used in lwir
}
final double disparity_corr = (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
double [][][][][][] clt_data = image_dtt.clt_aberrations_quad_corr_min(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
// 1, // final int macro_scale, // to correlate tile data instead of the pixel data: 1 - pixels, 8 - tiles
tile_op, // per-tile operation bit codes
disparity_array, // final double disparity,
double_stacks, // final double [][][] imade_data, // first index - number of image in a quad
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
// correlation results - final and partial
// clt_corr_combo, // [tp.tilesY][tp.tilesX][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// clt_corr_partial, // [tp.tilesY][tp.tilesX][pair][color][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// clt_mismatch, // [12][tp.tilesY * tp.tilesX] // transpose unapplied. null - do not calculate
disparity_map, // [2][tp.tilesY * tp.tilesX]
// texture_tiles, // [tp.tilesY][tp.tilesX]["RGBA".length()][];
imp_quad[0].getWidth(), // final int width,
clt_parameters.getFatZero(isMonochrome()), // add to denominator to modify phase correlation (same units as data1, data2). <0 - pure sum
// clt_parameters.corr_sym,
// clt_parameters.corr_offset,
clt_parameters.corr_red,
clt_parameters.corr_blue,
clt_parameters.getCorrSigma(image_dtt.isMonochrome()),
// clt_parameters.corr_normalize, // normalize correlation results by rms
min_corr_selected, // 0.0001; // minimal correlation value to consider valid
// clt_parameters.max_corr_sigma,// 1.5; // weights of points around global max to find fractional
// clt_parameters.max_corr_radius,
// clt_parameters.max_corr_double, // Double pass when masking center of mass to reduce preference for integer values
// clt_parameters.corr_mode, // Correlation mode: 0 - integer max, 1 - center of mass, 2 - polynomial
// clt_parameters.min_shot, // 10.0; // Do not adjust for shot noise if lower than
// clt_parameters.scale_shot, // 3.0; // scale when dividing by sqrt ( <0 - disable correction)
// clt_parameters.diff_sigma, // 5.0;//RMS difference from average to reduce weights (~ 1.0 - 1/255 full scale image)
// clt_parameters.diff_threshold, // 5.0; // RMS difference from average to discard channel (~ 1.0 - 1/255 full scale image)
// clt_parameters.diff_gauss, // true; // when averaging images, use gaussian around average as weight (false - sharp all/nothing)
// clt_parameters.min_agree, // 3.0; // minimal number of channels to agree on a point (real number to work with fuzzy averages)
// clt_parameters.dust_remove, // Do not reduce average weight when only one image differes much from the average
// clt_parameters.keep_weights, // Add port weights to RGBA stack (debug feature)
geometryCorrection, // final GeometryCorrection geometryCorrection,
null, // final GeometryCorrection geometryCorrection_main, // if not null correct this camera (aux) to the coordinates of the main
clt_kernels, // final double [][][][][][] clt_kernels, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step,
clt_parameters.transform_size,
clt_parameters.clt_window,
shiftXY, //
disparity_corr, // final double disparity_corr, // disparity at infinity
// (clt_parameters.fcorr_ignore? null: this.fine_corr),
// clt_parameters.corr_magic_scale, // still not understood coefficient that reduces reported disparity value. Seems to be around 0.85
clt_parameters.shift_x, // final int shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final int shiftY, // shift image vertically (positive - down)
clt_parameters.tileStep, // final int tileStep, // process tileStep x tileStep cluster of tiles when adjusting lazy eye parameters
clt_parameters.tileX, // -1234, // clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY, -1234 will cause port coordinates debug images
// (clt_parameters.dbg_mode & 64) != 0, // no fract shift
// (clt_parameters.dbg_mode & 128) != 0, // no convolve
// (clt_parameters.dbg_mode & 256) != 0, // transpose convolve
threadsMax,
debugLevel);
return results;
}
double [][] resizeGridTexture( // USED in lwir
double [][] imgData,
int tileSize,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment