Commit 9e339a49 authored by Andrey Filippov's avatar Andrey Filippov

improving matching with MB, working version

parent 6c3ff670
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
package com.elphel.imagej.tileprocessor; package com.elphel.imagej.tileprocessor;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
...@@ -2147,8 +2148,15 @@ public class ErsCorrection extends GeometryCorrection { ...@@ -2147,8 +2148,15 @@ public class ErsCorrection extends GeometryCorrection {
xyz4[1] /= xyz4[2]; xyz4[1] /= xyz4[2];
xyz4[2] = 1.0; xyz4[2] = 1.0;
} }
Matrix dpscene_dxyz = null;
Matrix dpscene_dxyz = dx_dpscene.inverse(); try {
dpscene_dxyz = dx_dpscene.inverse();
} catch (Exception e) {
// TODO Auto-generated catch block
System.out.println("dx_dpscene is singular");
e.printStackTrace();
return null;
}
Matrix dpscene_dxyz_minus = dpscene_dxyz.times(-1.0); // negated to calculate /d{pX,pY,D) for the scene parameters Matrix dpscene_dxyz_minus = dpscene_dxyz.times(-1.0); // negated to calculate /d{pX,pY,D) for the scene parameters
double[][] derivatives = new double[DP_NUM_PARS+1][]; // includes [0] - pXpYD vector double[][] derivatives = new double[DP_NUM_PARS+1][]; // includes [0] - pXpYD vector
// scene pX, pY, Disparity // scene pX, pY, Disparity
......
...@@ -1331,7 +1331,7 @@ public class ImageDtt extends ImageDttCPU { ...@@ -1331,7 +1331,7 @@ public class ImageDtt extends ImageDttCPU {
final double [][] pXpYD, // pXpYD for the reference scene final double [][] pXpYD, // pXpYD for the reference scene
final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked
final double fpn_radius, // radius to be blanked around FPN offset center final double fpn_radius, // radius to be blanked around FPN offset center
final boolean fpn_ignore_border, // only if fpn_mask != null - ignore tile if maximum touches fpn_mask final boolean fpn_ignore_border, // NOT used! only if fpn_mask != null - ignore tile if maximum touches fpn_mask
final double[][][] motion_vectors, // [tilesY*tilesX][][] -> [][num_sel_sensors+1 or 2][3] final double[][][] motion_vectors, // [tilesY*tilesX][][] -> [][num_sel_sensors+1 or 2][3]
final boolean run_poly, // polynomial max, if false - centroid final boolean run_poly, // polynomial max, if false - centroid
final boolean use_partial, // find motion vectors for individual pairs, false - for sum only final boolean use_partial, // find motion vectors for individual pairs, false - for sum only
......
...@@ -46,6 +46,13 @@ public class IntersceneLmaParameters { ...@@ -46,6 +46,13 @@ public class IntersceneLmaParameters {
public boolean ilma_debug_adjust_series = false; // Debug images for series of pose and ers public boolean ilma_debug_adjust_series = false; // Debug images for series of pose and ers
public boolean ilma_debug_invariant = false; // Monitoring variations when restarting program (should be ilma_thread_invariant=true) public boolean ilma_debug_invariant = false; // Monitoring variations when restarting program (should be ilma_thread_invariant=true)
public double ilma_motion_filter = 3.5; // filter atr, xyz between consecutive scenes to get velocities
public double [] ilma_scale_xyz = {1.0, 1.0, 1.0};
public double [] ilma_scale_atr = {1.0, 1.0, 1.0}; // roll - minus?
public boolean ilma_debug_ers = false; // Debug ers-related operation
public IntersceneLmaParameters() { public IntersceneLmaParameters() {
ilma_lma_select[ErsCorrection.DP_DVAZ]= true; ilma_lma_select[ErsCorrection.DP_DVAZ]= true;
ilma_lma_select[ErsCorrection.DP_DVTL]= true; ilma_lma_select[ErsCorrection.DP_DVTL]= true;
...@@ -141,6 +148,18 @@ public class IntersceneLmaParameters { ...@@ -141,6 +148,18 @@ public class IntersceneLmaParameters {
"Generate debug images and text output to verify same results regardless of threads" ); "Generate debug images and text output to verify same results regardless of threads" );
gd.addNumericField("Debug level", this.ilma_debug_level, 0,3,"", gd.addNumericField("Debug level", this.ilma_debug_level, 0,3,"",
"Debug level of interscene LMA operation."); "Debug level of interscene LMA operation.");
gd.addMessage("Debug ERS-related operation");
gd.addNumericField("Motion filter (half number of scenes)", this.ilma_motion_filter, 5,7,"",
"filter atr, xyz between consecutive scenes to get velocities.");
gd.addStringField ("XYZ scales", IntersceneMatchParameters.doublesToString(ilma_scale_xyz), 80,
"Scales for X, Y, and Z velocities, such as '1.0 1.0 1.0'.");
gd.addStringField ("ATR scales", IntersceneMatchParameters.doublesToString(ilma_scale_atr), 80,
"Scales for Azimuth, Tilt, and Roll velocities, such as '1.0 1.0 1.0'.");
gd.addCheckbox ("Debug ERS", this.ilma_debug_ers,
"Debug ERS-related parameters during pose fitting." );
} }
public void dialogAnswers(GenericJTabbedDialog gd) { public void dialogAnswers(GenericJTabbedDialog gd) {
...@@ -164,6 +183,12 @@ public class IntersceneLmaParameters { ...@@ -164,6 +183,12 @@ public class IntersceneLmaParameters {
this.ilma_debug_adjust_series = gd.getNextBoolean(); this.ilma_debug_adjust_series = gd.getNextBoolean();
this.ilma_debug_invariant = gd.getNextBoolean(); this.ilma_debug_invariant = gd.getNextBoolean();
this.ilma_debug_level = (int) gd.getNextNumber(); this.ilma_debug_level = (int) gd.getNextNumber();
this.ilma_motion_filter = gd.getNextNumber();
this.ilma_scale_xyz = IntersceneMatchParameters. StringToDoubles(gd.getNextString(), 3);
this.ilma_scale_atr = IntersceneMatchParameters. StringToDoubles(gd.getNextString(), 3);
this.ilma_debug_ers = gd.getNextBoolean();
} }
public void setProperties(String prefix,Properties properties){ public void setProperties(String prefix,Properties properties){
properties.setProperty(prefix+"ilma_thread_invariant", this.ilma_thread_invariant+""); properties.setProperty(prefix+"ilma_thread_invariant", this.ilma_thread_invariant+"");
...@@ -184,6 +209,11 @@ public class IntersceneLmaParameters { ...@@ -184,6 +209,11 @@ public class IntersceneLmaParameters {
properties.setProperty(prefix+"ilma_debug_adjust_series",this.ilma_debug_adjust_series+""); properties.setProperty(prefix+"ilma_debug_adjust_series",this.ilma_debug_adjust_series+"");
properties.setProperty(prefix+"ilma_debug_invariant", this.ilma_debug_invariant+""); properties.setProperty(prefix+"ilma_debug_invariant", this.ilma_debug_invariant+"");
properties.setProperty(prefix+"ilma_debug_level", this.ilma_debug_level+""); properties.setProperty(prefix+"ilma_debug_level", this.ilma_debug_level+"");
properties.setProperty(prefix+"ilma_motion_filter", this.ilma_motion_filter+"");
properties.setProperty(prefix+"ilma_scale_xyz", IntersceneMatchParameters.doublesToString(ilma_scale_xyz));
properties.setProperty(prefix+"ilma_scale_atr", IntersceneMatchParameters.doublesToString(ilma_scale_atr));
properties.setProperty(prefix+"ilma_debug_ers", this.ilma_debug_ers+"");
} }
public void getProperties(String prefix,Properties properties){ public void getProperties(String prefix,Properties properties){
if (properties.getProperty(prefix+"ilma_thread_invariant")!=null) this.ilma_thread_invariant=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_thread_invariant")); if (properties.getProperty(prefix+"ilma_thread_invariant")!=null) this.ilma_thread_invariant=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_thread_invariant"));
...@@ -206,6 +236,20 @@ public class IntersceneLmaParameters { ...@@ -206,6 +236,20 @@ public class IntersceneLmaParameters {
if (properties.getProperty(prefix+"ilma_debug_adjust_series")!=null)this.ilma_debug_adjust_series=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_adjust_series")); if (properties.getProperty(prefix+"ilma_debug_adjust_series")!=null)this.ilma_debug_adjust_series=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_adjust_series"));
if (properties.getProperty(prefix+"ilma_debug_invariant")!=null) this.ilma_debug_invariant=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_invariant")); if (properties.getProperty(prefix+"ilma_debug_invariant")!=null) this.ilma_debug_invariant=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_invariant"));
if (properties.getProperty(prefix+"ilma_debug_level")!=null) this.ilma_debug_level=Integer.parseInt(properties.getProperty(prefix+"ilma_debug_level")); if (properties.getProperty(prefix+"ilma_debug_level")!=null) this.ilma_debug_level=Integer.parseInt(properties.getProperty(prefix+"ilma_debug_level"));
if (properties.getProperty(prefix+"ilma_motion_filter")!=null) this.ilma_motion_filter=Double.parseDouble(properties.getProperty(prefix+"ilma_motion_filter"));
if (properties.getProperty(prefix+"ilma_scale_xyz")!=null) {
this.ilma_scale_xyz= IntersceneMatchParameters.StringToDoubles(
properties.getProperty(prefix+"ilma_scale_xyz"),3);
}
if (properties.getProperty(prefix+"ilma_scale_atr")!=null) {
this.ilma_scale_atr= IntersceneMatchParameters.StringToDoubles(
properties.getProperty(prefix+"ilma_scale_atr"),3);
}
if (properties.getProperty(prefix+"ilma_debug_ers")!=null) this.ilma_debug_ers=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_ers"));
} }
@Override @Override
...@@ -227,6 +271,12 @@ public class IntersceneLmaParameters { ...@@ -227,6 +271,12 @@ public class IntersceneLmaParameters {
ilp.ilma_debug_adjust_series = this.ilma_debug_adjust_series; ilp.ilma_debug_adjust_series = this.ilma_debug_adjust_series;
ilp.ilma_debug_invariant = this.ilma_debug_invariant; ilp.ilma_debug_invariant = this.ilma_debug_invariant;
ilp.ilma_debug_level = this.ilma_debug_level; ilp.ilma_debug_level = this.ilma_debug_level;
ilp.ilma_motion_filter = this.ilma_motion_filter;
ilp.ilma_scale_xyz = this.ilma_scale_xyz.clone();
ilp.ilma_scale_atr = this.ilma_scale_atr.clone();
ilp.ilma_debug_ers = this.ilma_debug_ers;
return ilp; return ilp;
} }
} }
...@@ -248,6 +248,7 @@ public class IntersceneMatchParameters { ...@@ -248,6 +248,7 @@ public class IntersceneMatchParameters {
public boolean mb_en = true; public boolean mb_en = true;
public double mb_tau = 0.008; // time constant, sec public double mb_tau = 0.008; // time constant, sec
public double mb_max_gain = 5.0; // motion blur maximal gain (if more - move second point more than a pixel public double mb_max_gain = 5.0; // motion blur maximal gain (if more - move second point more than a pixel
public double mb_max_gain_inter = 2.0; // same for interscene correlation for pose adjustment
public boolean stereo_merge = true; public boolean stereo_merge = true;
...@@ -732,6 +733,8 @@ public class IntersceneMatchParameters { ...@@ -732,6 +733,8 @@ public class IntersceneMatchParameters {
"Sensor exponential decay in seconds."); "Sensor exponential decay in seconds.");
gd.addNumericField("Maximal gain", this.mb_max_gain, 5,7,"x", gd.addNumericField("Maximal gain", this.mb_max_gain, 5,7,"x",
"Maximal gain for motion blur correction (if needed more for 1 pixel, increase offset)."); "Maximal gain for motion blur correction (if needed more for 1 pixel, increase offset).");
gd.addNumericField("Maximal gain pose", this.mb_max_gain_inter, 5,7,"x",
"Maximal gain for motion blur correction during interscene correlation (if needed more for 1 pixel, increase offset).");
gd.addTab("Stereo/Video","Parameters for stereo video generation"); gd.addTab("Stereo/Video","Parameters for stereo video generation");
gd.addMessage ("Stereo"); gd.addMessage ("Stereo");
...@@ -1016,7 +1019,7 @@ public class IntersceneMatchParameters { ...@@ -1016,7 +1019,7 @@ public class IntersceneMatchParameters {
this.mb_en = gd.getNextBoolean(); this.mb_en = gd.getNextBoolean();
this.mb_tau = gd.getNextNumber(); this.mb_tau = gd.getNextNumber();
this.mb_max_gain = gd.getNextNumber(); this.mb_max_gain = gd.getNextNumber();
this.mb_max_gain_inter = gd.getNextNumber();
if (stereo_views.length > 0) { if (stereo_views.length > 0) {
int i = gd.getNextChoiceIndex(); int i = gd.getNextChoiceIndex();
if (i > 0) { if (i > 0) {
...@@ -1296,7 +1299,7 @@ public class IntersceneMatchParameters { ...@@ -1296,7 +1299,7 @@ public class IntersceneMatchParameters {
properties.setProperty(prefix+"mb_en", this.mb_en+""); // boolean properties.setProperty(prefix+"mb_en", this.mb_en+""); // boolean
properties.setProperty(prefix+"mb_tau", this.mb_tau+""); // double properties.setProperty(prefix+"mb_tau", this.mb_tau+""); // double
properties.setProperty(prefix+"mb_max_gain", this.mb_max_gain+""); // double properties.setProperty(prefix+"mb_max_gain", this.mb_max_gain+""); // double
properties.setProperty(prefix+"mb_max_gain_inter", this.mb_max_gain_inter+""); // double
properties.setProperty(prefix+"stereo_merge", this.stereo_merge+""); // boolean properties.setProperty(prefix+"stereo_merge", this.stereo_merge+""); // boolean
properties.setProperty(prefix+"stereo_gap", this.stereo_gap+""); // int properties.setProperty(prefix+"stereo_gap", this.stereo_gap+""); // int
properties.setProperty(prefix+"stereo_intereye", this.stereo_intereye+""); // double properties.setProperty(prefix+"stereo_intereye", this.stereo_intereye+""); // double
...@@ -1531,6 +1534,7 @@ public class IntersceneMatchParameters { ...@@ -1531,6 +1534,7 @@ public class IntersceneMatchParameters {
if (properties.getProperty(prefix+"mb_en")!=null) this.mb_en=Boolean.parseBoolean(properties.getProperty(prefix+"mb_en")); if (properties.getProperty(prefix+"mb_en")!=null) this.mb_en=Boolean.parseBoolean(properties.getProperty(prefix+"mb_en"));
if (properties.getProperty(prefix+"mb_tau")!=null) this.mb_tau=Double.parseDouble(properties.getProperty(prefix+"mb_tau")); if (properties.getProperty(prefix+"mb_tau")!=null) this.mb_tau=Double.parseDouble(properties.getProperty(prefix+"mb_tau"));
if (properties.getProperty(prefix+"mb_max_gain")!=null) this.mb_max_gain=Double.parseDouble(properties.getProperty(prefix+"mb_max_gain")); if (properties.getProperty(prefix+"mb_max_gain")!=null) this.mb_max_gain=Double.parseDouble(properties.getProperty(prefix+"mb_max_gain"));
if (properties.getProperty(prefix+"mb_max_gain_inter")!=null) this.mb_max_gain_inter=Double.parseDouble(properties.getProperty(prefix+"mb_max_gain_inter"));
if (properties.getProperty(prefix+"stereo_merge")!=null) this.stereo_merge=Boolean.parseBoolean(properties.getProperty(prefix+"stereo_merge")); if (properties.getProperty(prefix+"stereo_merge")!=null) this.stereo_merge=Boolean.parseBoolean(properties.getProperty(prefix+"stereo_merge"));
if (properties.getProperty(prefix+"stereo_gap")!=null) this.stereo_gap=Integer.parseInt(properties.getProperty(prefix+"stereo_gap")); if (properties.getProperty(prefix+"stereo_gap")!=null) this.stereo_gap=Integer.parseInt(properties.getProperty(prefix+"stereo_gap"));
...@@ -1782,6 +1786,7 @@ public class IntersceneMatchParameters { ...@@ -1782,6 +1786,7 @@ public class IntersceneMatchParameters {
imp.mb_en = this.mb_en; imp.mb_en = this.mb_en;
imp.mb_tau = this.mb_tau; imp.mb_tau = this.mb_tau;
imp.mb_max_gain = this.mb_max_gain; imp.mb_max_gain = this.mb_max_gain;
imp.mb_max_gain_inter = this.mb_max_gain_inter;
imp.stereo_merge = this.stereo_merge; imp.stereo_merge = this.stereo_merge;
imp.stereo_gap = this.stereo_gap; imp.stereo_gap = this.stereo_gap;
......
...@@ -2470,7 +2470,7 @@ public class OpticalFlow { ...@@ -2470,7 +2470,7 @@ public class OpticalFlow {
title); // dsrbg_titles); title); // dsrbg_titles);
} }
public double [][] getSceneDisparityStrength( public static double [][] getSceneDisparityStrength(
final boolean to_ref_disparity, // false - return scene disparity, true - convert disparity back to the reference scene final boolean to_ref_disparity, // false - return scene disparity, true - convert disparity back to the reference scene
final double [] disparity_ref, // invalid tiles - NaN in disparity final double [] disparity_ref, // invalid tiles - NaN in disparity
final double [] disparity_scene, // invalid tiles - NaN in disparity (just for masking out invalid scene tiles) final double [] disparity_scene, // invalid tiles - NaN in disparity (just for masking out invalid scene tiles)
...@@ -2758,7 +2758,8 @@ public class OpticalFlow { ...@@ -2758,7 +2758,8 @@ public class OpticalFlow {
final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
int threadsMax) int threadsMax)
{ {
boolean debug_ers = false; // true; // false; // true; // true; //11.01.2022
boolean ignore_ers = false; // false;
TileProcessor tp = scene_QuadClt.getTileProcessor(); TileProcessor tp = scene_QuadClt.getTileProcessor();
final int tilesX = (full_woi_in==null) ? tp.getTilesX() : full_woi_in.width; // full width,includeing extra final int tilesX = (full_woi_in==null) ? tp.getTilesX() : full_woi_in.width; // full width,includeing extra
final int tilesY = (full_woi_in==null) ? tp.getTilesY() : full_woi_in.height; final int tilesY = (full_woi_in==null) ? tp.getTilesY() : full_woi_in.height;
...@@ -2794,9 +2795,30 @@ public class OpticalFlow { ...@@ -2794,9 +2795,30 @@ public class OpticalFlow {
final ErsCorrection ersSceneCorrection = scene_QuadClt.getErsCorrection(); final ErsCorrection ersSceneCorrection = scene_QuadClt.getErsCorrection();
final ErsCorrection ersReferenceCorrection = (reference_QuadClt!=null)? reference_QuadClt.getErsCorrection(): ersSceneCorrection; final ErsCorrection ersReferenceCorrection = (reference_QuadClt!=null)? reference_QuadClt.getErsCorrection(): ersSceneCorrection;
if (reference_QuadClt!=null) { if (reference_QuadClt!=null) {
if (ignore_ers) {
ersReferenceCorrection.setErsDt(
ZERO3, // double [] ers_xyz_dt,
ZERO3); // double [] ers_atr_dt);
}
ersReferenceCorrection.setupERS(); // just in case - setUP using instance parameters ersReferenceCorrection.setupERS(); // just in case - setUP using instance parameters
} }
if (ignore_ers) {
ersSceneCorrection.setErsDt(
ZERO3, // double [] ers_xyz_dt,
ZERO3); // double [] ers_atr_dt);
}
ersSceneCorrection.setupERS(); ersSceneCorrection.setupERS();
if (debug_ers) {
boolean same_scene = reference_QuadClt == scene_QuadClt;
if (reference_QuadClt != null) {
System.out.println("reference: "+ reference_QuadClt.getImageName());
ersReferenceCorrection.printVectors(null,null);
}
System.out.println("scene: "+ scene_QuadClt.getImageName()+(same_scene? " same scene as ref":""));
ersSceneCorrection.printVectors(scene_xyz, scene_atr);
}
final Thread[] threads = ImageDtt.newThreadArray(threadsMax); final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0); final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) { for (int ithread = 0; ithread < threads.length; ithread++) {
...@@ -5950,6 +5972,7 @@ public class OpticalFlow { ...@@ -5950,6 +5972,7 @@ public class OpticalFlow {
* @param nscene index of the current scene * @param nscene index of the current scene
* @return [2][3] array of {{dx/dt, dy/dt, dz/dt}, {dazimuth/dt, dtilt/dt, droll/dt}} * @return [2][3] array of {{dx/dt, dy/dt, dz/dt}, {dazimuth/dt, dtilt/dt, droll/dt}}
*/ */
@Deprecated // should get from HashMap at reference scene from timestamp , not re-calculate.
public static double [][] getVelocities( public static double [][] getVelocities(
QuadCLT [] quadCLTs, QuadCLT [] quadCLTs,
int nscene){ int nscene){
...@@ -6003,6 +6026,7 @@ public class OpticalFlow { ...@@ -6003,6 +6026,7 @@ public class OpticalFlow {
QuadCLT [] quadCLTs, QuadCLT [] quadCLTs,
int threadsMax, int threadsMax,
int debugLevel) { int debugLevel) {
boolean corr_raw_ers = true;
double [] stereo_atr = ZERO3; // maybe later play with rotated camera double [] stereo_atr = ZERO3; // maybe later play with rotated camera
boolean um_mono = clt_parameters.imp.um_mono; boolean um_mono = clt_parameters.imp.um_mono;
double um_sigma = clt_parameters.imp.um_sigma; double um_sigma = clt_parameters.imp.um_sigma;
...@@ -6021,6 +6045,9 @@ public class OpticalFlow { ...@@ -6021,6 +6045,9 @@ public class OpticalFlow {
sensor_mask = 1; sensor_mask = 1;
} }
String suffix = suffix_in+((mode3d > 0)?(merge_all?"-MERGED":"-SINGLE"):""); String suffix = suffix_in+((mode3d > 0)?(merge_all?"-MERGED":"-SINGLE"):"");
if ((mode3d <0) && (corr_raw_ers)) {
suffix+="-RAW_ERS";
}
int ref_index = quadCLTs.length -1; int ref_index = quadCLTs.length -1;
int num_sens = quadCLTs[ref_index].getNumSensors(); int num_sens = quadCLTs[ref_index].getNumSensors();
ErsCorrection ers_reference = quadCLTs[ref_index].getErsCorrection(); ErsCorrection ers_reference = quadCLTs[ref_index].getErsCorrection();
...@@ -6053,12 +6080,16 @@ public class OpticalFlow { ...@@ -6053,12 +6080,16 @@ public class OpticalFlow {
if ((scene_atr==null) || (scene_xyz == null)) { if ((scene_atr==null) || (scene_xyz == null)) {
continue; continue;
} }
if (mode3d >= 0) { if ((mode3d >= 0) || corr_raw_ers) {
double [] scene_ers_xyz_dt = ers_reference.getSceneErsXYZ_dt(ts); double [] scene_ers_xyz_dt = ers_reference.getSceneErsXYZ_dt(ts);
double [] scene_ers_atr_dt = ers_reference.getSceneErsATR_dt(ts); double [] scene_ers_atr_dt = ers_reference.getSceneErsATR_dt(ts);
quadCLTs[nscene].getErsCorrection().setErsDt( quadCLTs[nscene].getErsCorrection().setErsDt(
scene_ers_xyz_dt, // double [] ers_xyz_dt, scene_ers_xyz_dt, // double [] ers_xyz_dt,
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt); scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
if (mode3d < 0) { // velocities != 0, but offset=0
scene_xyz = ZERO3;
scene_atr = ZERO3;
}
} else { // ugly, restore for raw mode that should not be rotated/shifted } else { // ugly, restore for raw mode that should not be rotated/shifted
scene_xyz = ZERO3; scene_xyz = ZERO3;
scene_atr = ZERO3; scene_atr = ZERO3;
...@@ -6076,10 +6107,14 @@ public class OpticalFlow { ...@@ -6076,10 +6107,14 @@ public class OpticalFlow {
int sm = merge_all? -1: sensor_mask; int sm = merge_all? -1: sensor_mask;
ImagePlus imp_scene = null; ImagePlus imp_scene = null;
double [][] dxyzatr_dt = null; double [][] dxyzatr_dt = null;
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
if (mb_en) { if (mb_en) {
dxyzatr_dt = getVelocities( // dxyzatr_dt = getVelocities(
quadCLTs, // QuadCLT [] quadCLTs, // quadCLTs, // QuadCLT [] quadCLTs,
nscene); // int nscene) // nscene); // int nscene)
dxyzatr_dt = new double[][] { // for all, including ref
quadCLTs[nscene].getErsCorrection().getErsXYZ_dt(),
quadCLTs[nscene].getErsCorrection().getErsATR_dt()};
} }
...@@ -6123,8 +6158,9 @@ public class OpticalFlow { ...@@ -6123,8 +6158,9 @@ public class OpticalFlow {
clt_parameters, // CLTParameters clt_parameters, clt_parameters, // CLTParameters clt_parameters,
ref_disparity, // double [] disparity_ref, ref_disparity, // double [] disparity_ref,
// not used, just as null/not null now // not used, just as null/not null now
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates // null means uniform grid, no view transform. even with 0 rot ERS was changing results
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame ((!corr_raw_ers && (mode3d<0))? null:scene_xyz), // final double [] scene_xyz, // camera center in world coordinates
((!corr_raw_ers && (mode3d<0))? null:scene_atr), // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[nscene], // final QuadCLT scene, quadCLTs[nscene], // final QuadCLT scene,
quadCLTs[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref quadCLTs[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
toRGB, // final boolean toRGB, toRGB, // final boolean toRGB,
...@@ -6249,21 +6285,15 @@ public class OpticalFlow { ...@@ -6249,21 +6285,15 @@ public class OpticalFlow {
double [] use_atr = null; double [] use_atr = null;
int dbg_ntry = 61; int dbg_ntry = 61;
// testing switching to use interCorrPair() instead of interCorrPair_old(): double []ref_disparity = reference_QuadClt.getDLS()[use_lma_dsi?1:0];
boolean use_new_mode = true; double [][] pXpYD_ref = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
double [] ref_disparity = null;
double [][] pXpYD_ref = null;
TpTask[] tp_tasks_ref = null;
if (use_new_mode) {
ref_disparity = reference_QuadClt.getDLS()[use_lma_dsi?1:0];
pXpYD_ref = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null) null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?) ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // final double [] scene_xyz, // camera center in world coordinates ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
reference_QuadClt, // final QuadCLT scene_QuadClt, reference_QuadClt, // final QuadCLT scene_QuadClt,
reference_QuadClt); // final QuadCLT reference_QuadClt) reference_QuadClt); // final QuadCLT reference_QuadClt)
tp_tasks_ref = setReferenceGPU ( TpTask[] tp_tasks_ref = setReferenceGPU (
clt_parameters, // CLTParameters clt_parameters, clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT ref_scene, reference_QuadClt, // QuadCLT ref_scene,
ref_disparity, // double [] ref_disparity, // null or alternative reference disparity ref_disparity, // double [] ref_disparity, // null or alternative reference disparity
...@@ -6275,7 +6305,6 @@ public class OpticalFlow { ...@@ -6275,7 +6305,6 @@ public class OpticalFlow {
0.0, // double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel 0.0, // double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
null, // double [][] mb_vectors, // now [2][ntiles]; null, // double [][] mb_vectors, // now [2][ntiles];
debugLevel)[0]; // int debug_level) debugLevel)[0]; // int debug_level)
}
try_around: try_around:
for (rad = 00; rad <= search_rad; rad++) { for (rad = 00; rad <= search_rad; rad++) {
...@@ -6295,9 +6324,7 @@ public class OpticalFlow { ...@@ -6295,9 +6324,7 @@ public class OpticalFlow {
if (ntry==dbg_ntry) { if (ntry==dbg_ntry) {
System.out.println("ntry="+ntry); System.out.println("ntry="+ntry);
} }
double [][][] coord_motion = null; double [][][] coord_motion = interCorrPair( // new double [tilesY][tilesX][][];
if (use_new_mode) {
coord_motion = interCorrPair( // new double [tilesY][tilesX][][];
clt_parameters, // CLTParameters clt_parameters, clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT reference_QuadCLT, reference_QuadClt, // QuadCLT reference_QuadCLT,
ref_disparity, // null, // double [] ref_disparity, // null or alternative reference disparity ref_disparity, // null, // double [] ref_disparity, // null or alternative reference disparity
...@@ -6316,24 +6343,6 @@ public class OpticalFlow { ...@@ -6316,24 +6343,6 @@ public class OpticalFlow {
null, // double [][] mb_vectors, // now [2][ntiles]; null, // double [][] mb_vectors, // now [2][ntiles];
clt_parameters.imp.debug_level, // int imp_debug_level, clt_parameters.imp.debug_level, // int imp_debug_level,
clt_parameters.imp.debug_level); // 1); // -1); // int debug_level); clt_parameters.imp.debug_level); // 1); // -1); // int debug_level);
} else {
coord_motion = interCorrPair_old( // new double [tilesY][tilesX][][];
clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT reference_QuadCLT,
null, // double [] ref_disparity, // null or alternative reference disparity
scene_QuadClt, // QuadCLT scene_QuadCLT,
pose[0], // camera_xyz0, // xyz
atr, // camera_atr0, // pose[1], // atr
reliable_ref, // null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
facc_2d_img, // final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
null, // final float [][][] dbg_corr_fpn,
false, // boolean near_important, // do not reduce weight of the near tiles
treat_serch_fpn, // true, // boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
clt_parameters.imp.debug_level, // int imp_debug_level,
clt_parameters.imp.debug_level); // 1); // -1); // int debug_level);
}
// may return null if failed to match // may return null if failed to match
if (coord_motion == null) { if (coord_motion == null) {
System.out.println("Failed to find a match between the reference scene ("+reference_QuadClt.getImageName() + System.out.println("Failed to find a match between the reference scene ("+reference_QuadClt.getImageName() +
...@@ -7120,6 +7129,7 @@ public class OpticalFlow { ...@@ -7120,6 +7129,7 @@ public class OpticalFlow {
/** /**
* Calculate linear and angular velocities by running-average of the poses * Calculate linear and angular velocities by running-average of the poses
* Inverts Azimuth and Roll directions
* @param scenes Scene objects (to get timestamps) * @param scenes Scene objects (to get timestamps)
* @param scenes_xyzatr scene linear and angular coordinate [scene][2][3] * @param scenes_xyzatr scene linear and angular coordinate [scene][2][3]
* @param half_run_range number of scenes each way to average (0 - none, * @param half_run_range number of scenes each way to average (0 - none,
...@@ -7176,6 +7186,103 @@ public class OpticalFlow { ...@@ -7176,6 +7186,103 @@ public class OpticalFlow {
return ers_xyzatr; return ers_xyzatr;
} }
/**
* Calculate linear and angular velocities by running-average of the poses
* Does not invert Azimuth and Roll directions or any other
* @param scenes Scene objects (to get timestamps)
* @param ref_index reference scene index in scenes[]
* @param end_scene end scene (inclusive) to process
* @param scenes_xyzatr scene linear and angular coordinate [scene][2][3]
* @param half_run_range number of scenes each way to average (0 - none,
* 1 - 3, 2 - 5) reduced when near the limits
* @return [scene][2][3] xyz/dt, atr/dt.
*/
public static double [][][] getVelocitiesFromScenes(
QuadCLT [] scenes, // ordered by increasing timestamps
int ref_index,
int start_scene,
int end_scene,
double [][][] scenes_xyzatr, // <=0 use +/-1 or +0 if other are not available
double half_run_range
){
double [][][] ers_xyzatr = new double [scenes_xyzatr.length][][];
if (half_run_range <=0 ) {
ErsCorrection ers_reference =scenes[ref_index].getErsCorrection();
for (int nscene = start_scene; nscene <= end_scene; nscene ++) if (scenes[nscene] != null){
int nscene0 = nscene - 1;
if ((nscene0 < start_scene) ||
(scenes[nscene0]== null)||
(ers_reference.getSceneXYZ(scenes[nscene0].getImageName())== null) ||
(ers_reference.getSceneATR(scenes[nscene0].getImageName())== null)) {
nscene0 = nscene;
}
int nscene1 = nscene + 1;
if ((nscene1 > end_scene) || (scenes[nscene1]== null)) {
nscene1 = nscene;
}
if (nscene1 > nscene0) {
String ts0 = scenes[nscene0].getImageName();
String ts1 = scenes[nscene1].getImageName();
double dt = scenes[nscene1].getTimeStamp() - scenes[nscene0].getTimeStamp();
double [] scene_xyz0 = ers_reference.getSceneXYZ(ts0);
double [] scene_atr0 = ers_reference.getSceneATR(ts0);
double [] scene_xyz1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneXYZ(ts1);
double [] scene_atr1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneATR(ts1);
ers_xyzatr[nscene] = new double[2][3];
for (int i = 0; i < 3; i++) {
ers_xyzatr[nscene][0][i] = (scene_xyz1[i]-scene_xyz0[i])/dt;
ers_xyzatr[nscene][1][i] = (scene_atr1[i]-scene_atr0[i])/dt;
}
} else {
System.out.println("**** Isoloated scene!!! skipping... now may only happen for a ref_scene****");
}
}
} else {
double [] weights = new double [(int) Math.floor(half_run_range)+1];
weights[0] = 1;
for (int i = 1; i < weights.length; i++) {
weights[i] = 0.5* (Math.cos(i*Math.PI/half_run_range) +1.0);
}
for (int nscene = start_scene; nscene <= end_scene; nscene ++) if (scenes[nscene] != null){
double tref = scenes[nscene].getTimeStamp();
double s0 = 0.0, sx = 0.0, sx2 = 0.0;
double [][] sy = new double[2][3];
double [][] sxy = new double[2][3];
int nds = 0;
for (int ds = -weights.length + 1; ds < weights.length; ds++) {
int ns = nscene + ds;
if ((ns >= 0) && (ns < scenes_xyzatr.length) && (scenes_xyzatr[ns] != null)) {
double w = (ds >= 0) ? weights[ds] : weights[-ds];
s0 += w;
double dt = scenes[ns].getTimeStamp() - tref;
double wx = w * dt;
sx += wx;
sx2 += wx * dt;
for (int m = 0; m < sy.length; m++) {
for (int d = 0; d < sy[m].length; d++) {
double y = scenes_xyzatr[ns][m][d];
sy [m][d] += w * y;
sxy[m][d] += wx * y;
}
}
nds++; // number of different timestamps used
}
}
if ((nds > 1) && (s0 > 0)) {
ers_xyzatr[nscene] = new double[2][3];
for (int m = 0; m < sy.length; m++) {
for (int d = 0; d < sy[m].length; d++) {
ers_xyzatr[nscene][m][d] = (s0*sxy[m][d]-sx*sy[m][d])/(s0*sx2 - sx*sx);
}
}
}
}
}
return ers_xyzatr;
}
public void IntersceneAccumulate( public void IntersceneAccumulate(
CLTParameters clt_parameters, CLTParameters clt_parameters,
ColorProcParameters colorProcParameters, ColorProcParameters colorProcParameters,
...@@ -9238,6 +9345,9 @@ public class OpticalFlow { ...@@ -9238,6 +9345,9 @@ public class OpticalFlow {
double[][] disparity_map = no_map ? null : new double [image_dtt.getDisparityTitles().length][]; double[][] disparity_map = no_map ? null : new double [image_dtt.getDisparityTitles().length][];
final double disparity_corr = 0.00; // (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction); final double disparity_corr = 0.00; // (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
TpTask[] tp_tasks_ref = null; TpTask[] tp_tasks_ref = null;
//TODO: Need to set scenes[indx_ref].getErsCorrection().setErsDt() before processing scenes ?
for (int nscene = 0; nscene < scenes.length; nscene++) { for (int nscene = 0; nscene < scenes.length; nscene++) {
String ts = scenes[nscene].getImageName(); String ts = scenes[nscene].getImageName();
double [][] scene_pXpYD; double [][] scene_pXpYD;
...@@ -11023,12 +11133,12 @@ public class OpticalFlow { ...@@ -11023,12 +11133,12 @@ public class OpticalFlow {
return tp_tasks_ref; return tp_tasks_ref;
} }
// TODO: Still used in spiralSearchATR(), may be updated public double [][][] interCorrPair( // return [tilesX*telesY]{ref_pXpYD, dXdYS}
@Deprecated
public double [][][] interCorrPair_old( // return [tilesX*telesY]{ref_pXpYD, dXdYS}
CLTParameters clt_parameters, CLTParameters clt_parameters,
QuadCLT ref_scene, QuadCLT ref_scene,
double [] ref_disparity, // null or alternative reference disparity double [] ref_disparity, // null or alternative reference disparity
double [][] pXpYD_ref, // pXpYD for the reference scene
TpTask[] tp_tasks_ref, // only (main if MB correction) tasks for FPN correction
QuadCLT scene, QuadCLT scene,
double [] scene_xyz, double [] scene_xyz,
double [] scene_atr, double [] scene_atr,
...@@ -11039,9 +11149,14 @@ public class OpticalFlow { ...@@ -11039,9 +11149,14 @@ public class OpticalFlow {
final float [][] dbg_corr_fpn, final float [][] dbg_corr_fpn,
boolean near_important, // do not reduce weight of the near tiles boolean near_important, // do not reduce weight of the near tiles
boolean all_fpn, // do not lower thresholds for non-fpn (used during search) boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
// motion blur compensation
double [][] mb_vectors, // now [2][ntiles];
int imp_debug_level, // MANUALLY change to 2 for debug! int imp_debug_level, // MANUALLY change to 2 for debug!
int debug_level) int debug_level)
{ {
if (!ref_scene.hasGPU()) {
throw new IllegalArgumentException ("interCorrPair(): CPU mode not supported");
}
TileProcessor tp = ref_scene.getTileProcessor(); TileProcessor tp = ref_scene.getTileProcessor();
// Temporary reusing same ref scene ****** // Temporary reusing same ref scene ******
boolean scene_is_ref_test = clt_parameters.imp.scene_is_ref_test; // false; // true; boolean scene_is_ref_test = clt_parameters.imp.scene_is_ref_test; // false; // true;
...@@ -11063,6 +11178,7 @@ public class OpticalFlow { ...@@ -11063,6 +11178,7 @@ public class OpticalFlow {
boolean fpn_ignore_border = clt_parameters.imp.fpn_ignore_border; // only if fpn_mask != null - ignore tile if maximum touches fpn_mask boolean fpn_ignore_border = clt_parameters.imp.fpn_ignore_border; // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
boolean eq_debug = false; boolean eq_debug = false;
boolean transform_debug = false;
boolean eq_en = near_important && clt_parameters.imp.eq_en; // true;// equalize "important" FG tiles for better camera XYZ fitting boolean eq_en = near_important && clt_parameters.imp.eq_en; // true;// equalize "important" FG tiles for better camera XYZ fitting
int eq_stride_hor = clt_parameters.imp.eq_stride_hor; // 8; int eq_stride_hor = clt_parameters.imp.eq_stride_hor; // 8;
int eq_stride_vert = clt_parameters.imp.eq_stride_vert; // 8; int eq_stride_vert = clt_parameters.imp.eq_stride_vert; // 8;
...@@ -11075,13 +11191,14 @@ public class OpticalFlow { ...@@ -11075,13 +11191,14 @@ public class OpticalFlow {
double eq_weight_scale = clt_parameters.imp.eq_weight_scale; // 10; double eq_weight_scale = clt_parameters.imp.eq_weight_scale; // 10;
double eq_level = clt_parameters.imp.eq_level; // 0.8; // equalize to (log) fraction of average/this strength double eq_level = clt_parameters.imp.eq_level; // 0.8; // equalize to (log) fraction of average/this strength
final double gpu_sigma_corr = clt_parameters.getGpuCorrSigma(ref_scene.isMonochrome());
final double gpu_sigma_rb_corr = ref_scene.isMonochrome()? 1.0 : clt_parameters.gpu_sigma_rb_corr;
final double gpu_sigma_log_corr = clt_parameters.getGpuCorrLoGSigma(ref_scene.isMonochrome());
if (scene_is_ref_test) { boolean mb_en = clt_parameters.imp.mb_en ;
scene_xyz = ZERO3.clone(); double mb_tau = clt_parameters.imp.mb_tau; // 0.008; // time constant, sec
scene_atr = ZERO3.clone(); double mb_max_gain = clt_parameters.imp.mb_max_gain_inter; // 2.0; // motion blur maximal gain (if more - move second point more than a pixel
ref_scene = scene; // or opposite: scene = ref_scene;
}
int tilesX = tp.getTilesX(); int tilesX = tp.getTilesX();
int tilesY = tp.getTilesY(); int tilesY = tp.getTilesY();
double [][][] coord_motion = null; // new double [2][tilesX*tilesY][]; double [][][] coord_motion = null; // new double [2][tilesX*tilesY][];
...@@ -11110,36 +11227,79 @@ public class OpticalFlow { ...@@ -11110,36 +11227,79 @@ public class OpticalFlow {
if (ref_disparity == null) { if (ref_disparity == null) {
ref_disparity = ref_scene.getDLS()[use_lma_dsi?1:0]; ref_disparity = ref_scene.getDLS()[use_lma_dsi?1:0];
} }
double [][] ref_pXpYD = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
double [][] scene_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null) null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?) ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // final double [] scene_xyz, // camera center in world coordinates scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
scene_is_ref_test ? null: ref_scene); // final QuadCLT reference_QuadClt)
if (transform_debug) {
// calculate with no transform
double [][] dbg_ref_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene, // final QuadCLT scene_QuadClt, ref_scene, // final QuadCLT scene_QuadClt,
ref_scene); // final QuadCLT reference_QuadClt) ref_scene); // final QuadCLT reference_QuadClt)
final double gpu_sigma_corr = clt_parameters.getGpuCorrSigma(ref_scene.isMonochrome()); double [][] dbg_ref_pXpYD2 = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
final double gpu_sigma_rb_corr = ref_scene.isMonochrome()? 1.0 : clt_parameters.gpu_sigma_rb_corr; null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
final double gpu_sigma_log_corr = clt_parameters.getGpuCorrLoGSigma(ref_scene.isMonochrome()); ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
TpTask[] tp_tasks_ref = GpuQuad.setInterTasks( ZERO3, // scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
ref_scene.getNumSensors(), new double[] {0,0,0.00001}, // scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene.getErsCorrection().getSensorWH()[0], ref_scene, // final QuadCLT scene_QuadClt,
!ref_scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY ref_scene); // final QuadCLT reference_QuadClt)
ref_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls) double [][] dbg_img = new double[6][dbg_ref_pXpYD.length];
for (int i = 0; i < dbg_img.length; i++) {
Arrays.fill(dbg_img[i], Double.NaN);
}
for (int nt = 0; nt < dbg_ref_pXpYD.length; nt++) {
if (dbg_ref_pXpYD[nt] != null) {
for (int i = 0; i < dbg_ref_pXpYD[nt].length; i++){
dbg_img[2*i][nt] = dbg_ref_pXpYD[nt][i];
}
}
if (dbg_ref_pXpYD2[nt] != null) {
for (int i = 0; i < dbg_ref_pXpYD2[nt].length; i++){
dbg_img[2*i+1][nt] = dbg_ref_pXpYD2[nt][i];
}
}
}
String [] dbg_titles = {"pX0","pX2","pY0","pY2","disp0","disp2"};
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-transform_test",
dbg_titles);
System.out.println("transform_test ref_scene: "+ ref_scene.getImageName());
ref_scene.getErsCorrection().printVectors(scene_xyz, scene_atr);
}
TpTask[][] tp_tasks;
if (mb_en && (mb_vectors!=null)) {
tp_tasks = GpuQuad.setInterTasksMotionBlur(
scene.getNumSensors(),
scene.getErsCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
ref_scene.getErsCorrection(), // final GeometryCorrection geometryCorrection, // motion blur compensation
mb_tau, // final double mb_tau, // 0.008; // time constant, sec
mb_max_gain, // final double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
mb_vectors, // final double [][] mb_vectors, //
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr, disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles, null, // final boolean [] valid_tiles,
THREADS_MAX); // final int threadsMax) // maximal number of threads to launch THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
} else {
double [][] scene_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY tp_tasks = new TpTask[1][];
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null) tp_tasks[0] = GpuQuad.setInterTasks(
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
scene_is_ref_test ? null: ref_scene); // final QuadCLT reference_QuadClt)
TpTask[] tp_tasks = GpuQuad.setInterTasks(
scene.getNumSensors(), scene.getNumSensors(),
scene.getErsCorrection().getSensorWH()[0], scene.getErsCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY !scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
...@@ -11150,40 +11310,31 @@ public class OpticalFlow { ...@@ -11150,40 +11310,31 @@ public class OpticalFlow {
margin, // final int margin, // do not use tiles if their centers are closer to the edges margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles, null, // final boolean [] valid_tiles,
THREADS_MAX); // final int threadsMax) // maximal number of threads to launch THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
}
if (ref_scene.hasGPU()) { // direct convert to reference_clt scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
float [][][][] fcorr_td = null; // no accumulation, use data in GPU float [][][][] fcorr_td = null; // no accumulation, use data in GPU
ref_scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry if (mb_en && (mb_vectors!=null)) {
image_dtt.setReferenceTD( // tp_tasks_ref will be updated image_dtt.interCorrTDMotionBlur(
erase_clt,
null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
true, // final boolean use_reference_buffer, tp_tasks, // final TpTask[][] tp_tasks,
tp_tasks_ref, // final TpTask[] tp_tasks, fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1 clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1 clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7 clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7; clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
gpu_sigma_rb_corr, // final double gpu_sigma_rb_corr, // = 0.5; // apply LPF after accumulating R and B correlation before G, monochrome ? 1.0 :
gpu_sigma_corr, // final double gpu_sigma_corr, // = 0.9;gpu_sigma_corr_m
gpu_sigma_log_corr, // final double gpu_sigma_log_corr, // hpf to reduce dynamic range for correlations
clt_parameters.corr_red, // final double corr_red, // +used
clt_parameters.corr_blue, // final double corr_blue,// +used
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel); debug_level); // final int globalDebugLevel);
if (show_render_ref) { } else {
ImagePlus imp_render_ref = ref_scene.renderFromTD (
-1, // final int sensor_mask,
false, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
true, //boolean use_reference
"GPU-SHIFTED-REFERENCE"); // String suffix)
imp_render_ref.show();
}
scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
image_dtt.interCorrTD( image_dtt.interCorrTD(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks, // final TpTask[] tp_tasks, tp_tasks[0], // final TpTask[] tp_tasks,
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1 clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1 clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
...@@ -11197,6 +11348,7 @@ public class OpticalFlow { ...@@ -11197,6 +11348,7 @@ public class OpticalFlow {
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all. sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel); debug_level); // final int globalDebugLevel);
}
if (show_render_ref) { if (show_render_ref) {
ImagePlus imp_render_ref = ref_scene.renderFromTD ( ImagePlus imp_render_ref = ref_scene.renderFromTD (
-1, // final int sensor_mask, -1, // final int sensor_mask,
...@@ -11210,7 +11362,7 @@ public class OpticalFlow { ...@@ -11210,7 +11362,7 @@ public class OpticalFlow {
"GPU-SHIFTED-REF"); // String suffix) "GPU-SHIFTED-REF"); // String suffix)
imp_render_ref.show(); imp_render_ref.show();
} }
if (show_render_scene) { if (show_render_scene) { //set toRGB=false
ImagePlus imp_render_scene = scene.renderFromTD ( ImagePlus imp_render_scene = scene.renderFromTD (
-1, // final int sensor_mask, -1, // final int sensor_mask,
false, // boolean merge_channels, false, // boolean merge_channels,
...@@ -11221,12 +11373,13 @@ public class OpticalFlow { ...@@ -11221,12 +11373,13 @@ public class OpticalFlow {
toRGB, // boolean toRGB, toRGB, // boolean toRGB,
false, //boolean use_reference false, //boolean use_reference
"GPU-SHIFTED-SCENE"); // String suffix) "GPU-SHIFTED-SCENE"); // String suffix)
imp_render_scene.show(); imp_render_scene.show();
} }
if (dbg_corr_fpn != null) { // 2*16 or 2*17 (average, individual) if (dbg_corr_fpn != null) { // 2*16 or 2*17 (average, individual)
float [][] foffsets = getInterCorrOffsetsDebug( float [][] foffsets = getInterCorrOffsetsDebug(
tp_tasks_ref, // final TpTask[] tp_tasks_ref, tp_tasks_ref, // final TpTask[] tp_tasks_ref,
tp_tasks, // final TpTask[] tp_tasks, tp_tasks[0], // final TpTask[] tp_tasks,
tilesX, // final int tilesX, tilesX, // final int tilesX,
tilesY); // final int tilesY tilesY); // final int tilesY
for (int i = 0; (i < dbg_corr_fpn.length) && (i < foffsets.length); i++) { for (int i = 0; (i < dbg_corr_fpn.length) && (i < foffsets.length); i++) {
...@@ -11239,30 +11392,35 @@ public class OpticalFlow { ...@@ -11239,30 +11392,35 @@ public class OpticalFlow {
fpn_offsets = getInterCorrOffsets( fpn_offsets = getInterCorrOffsets(
fpn_max_offset, // final double max_offset, fpn_max_offset, // final double max_offset,
tp_tasks_ref, // final TpTask[] tp_tasks_ref, tp_tasks_ref, // final TpTask[] tp_tasks_ref,
tp_tasks, // final TpTask[] tp_tasks, tp_tasks[0], // final TpTask[] tp_tasks,
tilesX, // final int tilesX, tilesX, // final int tilesX,
tilesY); // final int tilesY); tilesY); // final int tilesY);
} }
double half_disparity = near_important ? 0.0 : clt_parameters.imp.half_disparity; double half_disparity = near_important ? 0.0 : clt_parameters.imp.half_disparity;
double [][][] dcorr_tiles = (fclt_corr != null)? (new double [tp_tasks.length][][]):null; double [][][] dcorr_tiles = (fclt_corr != null)? (new double [tp_tasks[0].length][][]):null;
// will use num_acc with variable number of accumulations (e.g. clusters) // will use num_acc with variable number of accumulations (e.g. clusters)
//all_fpn //all_fpn
double min_str = all_fpn ? clt_parameters.imp.min_str_fpn : clt_parameters.imp.min_str; double min_str = all_fpn ? clt_parameters.imp.min_str_fpn : clt_parameters.imp.min_str;
double min_str_sum = all_fpn ? clt_parameters.imp.min_str_sum_fpn : clt_parameters.imp.min_str_sum; double min_str_sum = all_fpn ? clt_parameters.imp.min_str_sum_fpn : clt_parameters.imp.min_str_sum;
double corr_fz_inter = clt_parameters.getGpuFatZeroInter(ref_scene.isMonochrome());
if (mb_en && (mb_vectors!=null)) { // increase fat zero when there is motion blur
corr_fz_inter *= 8;
}
coord_motion = image_dtt.clt_process_tl_interscene( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td coord_motion = image_dtt.clt_process_tl_interscene( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of all selected corr pairs fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of all selected corr pairs
null, // float [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair] (or null). Can be inner null if not used in tp_tasks null, // float [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair] (or null). Can be inner null if not used in tp_tasks
null, // double [] dcorr_weight, // alternative to num_acc, compatible with CPU processing (only one non-zero enough) null, // double [] dcorr_weight, // alternative to num_acc, compatible with CPU processing (only one non-zero enough)
clt_parameters.gpu_corr_scale, // final double gpu_corr_scale, // 0.75; // reduce GPU-generated correlation values clt_parameters.gpu_corr_scale, // final double gpu_corr_scale, // 0.75; // reduce GPU-generated correlation values
clt_parameters.getGpuFatZeroInter(ref_scene.isMonochrome()), // final double gpu_fat_zero, // clt_parameters.getGpuFatZero(is_mono);absolute == 30.0 corr_fz_inter, // final double gpu_fat_zero, // clt_parameters.getGpuFatZero(is_mono);absolute == 30.0
image_dtt.transform_size - 1, // final int gpu_corr_rad, // = transform_size - 1 ? image_dtt.transform_size - 1, // final int gpu_corr_rad, // = transform_size - 1 ?
// The tp_tasks data should be decoded from GPU to get coordinates // The tp_tasks data should be decoded from GPU to get coordinates
tp_tasks, // final TpTask [] tp_tasks, // data from the reference frame - will be applied to LMA for the integrated correlations // should it be reference or scene? Or any?
tp_tasks[0], // final TpTask [] tp_tasks, // data from the reference frame - will be applied to LMA for the integrated correlations
// to be converted to float (may be null) // to be converted to float (may be null)
dcorr_tiles, // final double [][][] dcorr_tiles, // [tile][pair_abs, sparse][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate dcorr_tiles, // final double [][][] dcorr_tiles, // [tile][pair_abs, sparse][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
ref_pXpYD, // final double [][] pXpYD, // pXpYD for the reference scene pXpYD_ref, // ref_pXpYD, // final double [][] pXpYD, // pXpYD for the reference scene
fpn_offsets, // final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked fpn_offsets, // final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked
fpn_radius, // final double fpn_radius, // radius to be blanked around FPN offset center fpn_radius, // final double fpn_radius, // radius to be blanked around FPN offset center
fpn_ignore_border, // final boolean fpn_ignore_border, // only if fpn_mask != null - ignore tile if maximum touches fpn_mask fpn_ignore_border, // final boolean fpn_ignore_border, // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
...@@ -11293,8 +11451,8 @@ public class OpticalFlow { ...@@ -11293,8 +11451,8 @@ public class OpticalFlow {
} }
if (eq_en) { if (eq_en) {
// double eq_weight_add = (min_str * clt_parameters.imp.pd_weight + min_str_sum * clt_parameters.imp.td_weight) / // double eq_weight_add = (min_str * clt_parameters.imp.pd_weight + min_str_sum * clt_parameters.imp.td_weight) /
// (clt_parameters.imp.pd_weight + clt_parameters.imp.td_weight); // (clt_parameters.imp.pd_weight + clt_parameters.imp.td_weight);
double [] strength_backup = null; double [] strength_backup = null;
if (eq_debug) { // **** Set manually in debugger **** if (eq_debug) { // **** Set manually in debugger ****
strength_backup = new double [coord_motion[1].length]; strength_backup = new double [coord_motion[1].length];
...@@ -11370,7 +11528,7 @@ public class OpticalFlow { ...@@ -11370,7 +11528,7 @@ public class OpticalFlow {
} }
} }
} }
// seems that fclt_corr and fclt_corr1 are both not needed w/o debug
float [][][] fclt_corr1 = ImageDtt.convertFcltCorr( // partial length, matching corr_indices = gpuQuad.getCorrIndices(); // also sets num_corr_tiles float [][][] fclt_corr1 = ImageDtt.convertFcltCorr( // partial length, matching corr_indices = gpuQuad.getCorrIndices(); // also sets num_corr_tiles
dcorr_tiles, // double [][][] dcorr_tiles,// [tile][sparse, correlation pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate dcorr_tiles, // double [][][] dcorr_tiles,// [tile][sparse, correlation pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
fclt_corr); // float [][][] fclt_corr) // new float [tilesX * tilesY][][] or null fclt_corr); // float [][][] fclt_corr) // new float [tilesX * tilesY][][] or null
...@@ -11404,484 +11562,9 @@ public class OpticalFlow { ...@@ -11404,484 +11562,9 @@ public class OpticalFlow {
scene.getImageName()+"-"+ref_scene.getImageName()+"-interscene", scene.getImageName()+"-"+ref_scene.getImageName()+"-interscene",
titles); titles);
} }
} else {
throw new IllegalArgumentException ("interCorrPair(): CPU mode not supported");
}
if (motion_vectors != null) {
int num_sens = 0;
int num_rslt = 0;
find_num_layers:
for (int nt = 0; nt <motion_vectors.length; nt++) {
if (motion_vectors[nt] != null) {
num_sens = motion_vectors[nt].length;
for (int i = 0; i < num_sens; i++) {
if (motion_vectors[nt][i] != null) {
num_rslt = motion_vectors[nt][i].length;
break find_num_layers;
}
}
}
}
double [][] dbg_img = new double [num_sens * num_rslt][tilesX*tilesY];
String [] titles = new String [dbg_img.length];
for (int ns = 0; ns < num_sens; ns++) {
String ss = (ns == (num_sens - 1))?"sum":(""+ns);
for (int nr = 0; nr < num_rslt; nr++) {
int indx = ns*num_rslt+nr;
Arrays.fill(dbg_img[indx],Double.NaN);
String sr="";
switch (nr) {
case 0:sr ="X";break;
case 1:sr ="Y";break;
case 2:sr ="S";break;
default:
sr = ""+nr;
}
titles[indx] = ss+":"+sr;
for (int nt = 0; nt <motion_vectors.length; nt++) {
if ((motion_vectors[nt] != null) && (motion_vectors[nt][ns] != null) ) {
dbg_img[indx][nt] = motion_vectors[nt][ns][nr];
}
}
}
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-motion_vectors",
titles);
}
if (show_coord_motion) {
//coord_motion
String [] mvTitles = {"dx", "dy", "conf", "pX", "pY","Disp","defined"}; // ,"blurX","blurY", "blur"};
double [][] dbg_img = new double [mvTitles.length][tilesX*tilesY];
for (int l = 0; l < dbg_img.length; l++) {
Arrays.fill(dbg_img[l], Double.NaN);
}
for (int nTile = 0; nTile < coord_motion[0].length; nTile++) {
if (coord_motion[0][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[3+i][nTile] = coord_motion[0][nTile][i];
}
}
if (coord_motion[1][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[0+i][nTile] = coord_motion[1][nTile][i];
}
}
dbg_img[6][nTile] = ((coord_motion[0][nTile] != null)?1:0)+((coord_motion[0][nTile] != null)?2:0);
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-coord_motion",
mvTitles);
}
if (debug_level > 0){
int num_defined = 0;
double sum_strength = 0.0;
for (int i = 0; i < coord_motion[1].length; i++) if (coord_motion[1][i] != null){
sum_strength += coord_motion[1][i][2];
num_defined++;
}
System.out.println ("interCorrPair(): num_defined = "+num_defined+
", sum_strength = "+sum_strength+", avg_strength = "+(sum_strength/num_defined));
}
return coord_motion;
}
public double [][][] interCorrPair( // return [tilesX*telesY]{ref_pXpYD, dXdYS}
CLTParameters clt_parameters,
QuadCLT ref_scene,
double [] ref_disparity, // null or alternative reference disparity
double [][] pXpYD_ref, // pXpYD for the reference scene
TpTask[] tp_tasks_ref, // only (main if MB correction) tasks for FPN correction
QuadCLT scene,
double [] scene_xyz,
double [] scene_atr,
final boolean [] selection, // may be null, if not null do not process unselected tiles
final int margin,
final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
final float [][] dbg_corr_fpn,
boolean near_important, // do not reduce weight of the near tiles
boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
// motion blur compensation
double [][] mb_vectors, // now [2][ntiles];
int imp_debug_level, // MANUALLY change to 2 for debug!
int debug_level)
{
if (!ref_scene.hasGPU()) {
throw new IllegalArgumentException ("interCorrPair(): CPU mode not supported");
}
TileProcessor tp = ref_scene.getTileProcessor();
// Temporary reusing same ref scene ******
boolean scene_is_ref_test = clt_parameters.imp.scene_is_ref_test; // false; // true;
boolean show_2d_correlations = clt_parameters.imp.show2dCorrelations(imp_debug_level); // true;
boolean show_motion_vectors = clt_parameters.imp.showMotionVectors(imp_debug_level); // true;
boolean show_render_ref = clt_parameters.imp.renderRef(imp_debug_level); // false; //true;
boolean show_render_scene = clt_parameters.imp.renderScene(imp_debug_level); // false; // true;
boolean toRGB = clt_parameters.imp.toRGB ; // true;
boolean show_coord_motion = clt_parameters.imp.showCorrMotion(imp_debug_level); // mae its own
int erase_clt = (toRGB? clt_parameters.imp.show_color_nan : clt_parameters.imp.show_mono_nan) ? 1:0;
boolean use_lma_dsi = clt_parameters.imp.use_lma_dsi;
boolean mov_en = clt_parameters.imp.mov_en; // true; // enable detection/removal of the moving objects during pose matching
boolean mov_debug_images = clt_parameters.imp.showMovementDetection(imp_debug_level);
int mov_debug_level = clt_parameters.imp.movDebugLevel(imp_debug_level);
boolean fpn_remove = clt_parameters.imp.fpn_remove;
double fpn_max_offset = clt_parameters.imp.fpn_max_offset;
double fpn_radius = clt_parameters.imp.fpn_radius;
boolean fpn_ignore_border = clt_parameters.imp.fpn_ignore_border; // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
boolean eq_debug = false;
boolean eq_en = near_important && clt_parameters.imp.eq_en; // true;// equalize "important" FG tiles for better camera XYZ fitting
int eq_stride_hor = clt_parameters.imp.eq_stride_hor; // 8;
int eq_stride_vert = clt_parameters.imp.eq_stride_vert; // 8;
double eq_min_stile_weight = clt_parameters.imp.eq_min_stile_weight; // 0.2; // 1.0;
int eq_min_stile_number = clt_parameters.imp.eq_min_stile_number; // 10;
double eq_min_stile_fraction =clt_parameters.imp.eq_min_stile_fraction; // 0.02; // 0.05;
double eq_min_disparity = clt_parameters.imp.eq_min_disparity; // 5;
double eq_max_disparity = clt_parameters.imp.eq_max_disparity; // 100;
double eq_weight_add = clt_parameters.imp.eq_weight_add; // 0.05;
double eq_weight_scale = clt_parameters.imp.eq_weight_scale; // 10;
double eq_level = clt_parameters.imp.eq_level; // 0.8; // equalize to (log) fraction of average/this strength
final double gpu_sigma_corr = clt_parameters.getGpuCorrSigma(ref_scene.isMonochrome());
final double gpu_sigma_rb_corr = ref_scene.isMonochrome()? 1.0 : clt_parameters.gpu_sigma_rb_corr;
final double gpu_sigma_log_corr = clt_parameters.getGpuCorrLoGSigma(ref_scene.isMonochrome());
boolean mb_en = clt_parameters.imp.mb_en ;
double mb_tau = clt_parameters.imp.mb_tau; // 0.008; // time constant, sec
double mb_max_gain = clt_parameters.imp.mb_max_gain; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
int tilesX = tp.getTilesX();
int tilesY = tp.getTilesY();
double [][][] coord_motion = null; // new double [2][tilesX*tilesY][];
final double [][][] motion_vectors = show_motion_vectors?new double [tilesY *tilesX][][]:null;
final float [][][] fclt_corr = ((accum_2d_corr != null) || show_2d_correlations) ?
(new float [tilesX * tilesY][][]) : null;
if (debug_level > 1) {
System.out.println("interCorrPair(): "+IntersceneLma.printNameV3("ATR",scene_atr)+
" "+IntersceneLma.printNameV3("XYZ",scene_xyz));
}
ImageDtt image_dtt;
image_dtt = new ImageDtt(
numSens,
clt_parameters.transform_size,
clt_parameters.img_dtt,
ref_scene.isAux(),
ref_scene.isMonochrome(),
ref_scene.isLwir(),
clt_parameters.getScaleStrength(ref_scene.isAux()),
ref_scene.getGPU());
if (ref_scene.getGPU() != null) {
ref_scene.getGPU().setGpu_debug_level(debug_level - 4); // monitor GPU ops >=-1
}
final double disparity_corr = 0.0; // (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
//ref_disparity
if (ref_disparity == null) {
ref_disparity = ref_scene.getDLS()[use_lma_dsi?1:0];
}
double [][] scene_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
scene_is_ref_test ? null: ref_scene); // final QuadCLT reference_QuadClt)
TpTask[][] tp_tasks;
if (mb_en && (mb_vectors!=null)) {
tp_tasks = GpuQuad.setInterTasksMotionBlur(
scene.getNumSensors(),
scene.getErsCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
// motion blur compensation
mb_tau, // final double mb_tau, // 0.008; // time constant, sec
mb_max_gain, // final double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
mb_vectors, // final double [][] mb_vectors, //
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
} else {
tp_tasks = new TpTask[1][];
tp_tasks[0] = GpuQuad.setInterTasks(
scene.getNumSensors(),
scene.getErsCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
}
scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
float [][][][] fcorr_td = null; // no accumulation, use data in GPU
if (mb_en && (mb_vectors!=null)) {
image_dtt.interCorrTDMotionBlur(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks, // final TpTask[][] tp_tasks,
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
gpu_sigma_rb_corr, // final double gpu_sigma_rb_corr, // = 0.5; // apply LPF after accumulating R and B correlation before G, monochrome ? 1.0 :
gpu_sigma_corr, // final double gpu_sigma_corr, // = 0.9;gpu_sigma_corr_m
gpu_sigma_log_corr, // final double gpu_sigma_log_corr, // hpf to reduce dynamic range for correlations
clt_parameters.corr_red, // final double corr_red, // +used
clt_parameters.corr_blue, // final double corr_blue,// +used
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel);
} else {
image_dtt.interCorrTD(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks[0], // final TpTask[] tp_tasks,
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
gpu_sigma_rb_corr, // final double gpu_sigma_rb_corr, // = 0.5; // apply LPF after accumulating R and B correlation before G, monochrome ? 1.0 :
gpu_sigma_corr, // final double gpu_sigma_corr, // = 0.9;gpu_sigma_corr_m
gpu_sigma_log_corr, // final double gpu_sigma_log_corr, // hpf to reduce dynamic range for correlations
clt_parameters.corr_red, // final double corr_red, // +used
clt_parameters.corr_blue, // final double corr_blue,// +used
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel);
}
if (show_render_ref) {
ImagePlus imp_render_ref = ref_scene.renderFromTD (
-1, // final int sensor_mask,
false, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
true, //boolean use_reference
"GPU-SHIFTED-REF"); // String suffix)
imp_render_ref.show();
}
if (show_render_scene) { //set toRGB=false
ImagePlus imp_render_scene = scene.renderFromTD (
-1, // final int sensor_mask,
false, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
false, //boolean use_reference
"GPU-SHIFTED-SCENE"); // String suffix)
imp_render_scene.show();
}
if (dbg_corr_fpn != null) { // 2*16 or 2*17 (average, individual)
float [][] foffsets = getInterCorrOffsetsDebug(
tp_tasks_ref, // final TpTask[] tp_tasks_ref,
tp_tasks[0], // final TpTask[] tp_tasks,
tilesX, // final int tilesX,
tilesY); // final int tilesY
for (int i = 0; (i < dbg_corr_fpn.length) && (i < foffsets.length); i++) {
dbg_corr_fpn[i] = foffsets[i];
}
}
double [][] fpn_offsets = null;
if (fpn_remove) {
fpn_offsets = getInterCorrOffsets(
fpn_max_offset, // final double max_offset,
tp_tasks_ref, // final TpTask[] tp_tasks_ref,
tp_tasks[0], // final TpTask[] tp_tasks,
tilesX, // final int tilesX,
tilesY); // final int tilesY);
}
double half_disparity = near_important ? 0.0 : clt_parameters.imp.half_disparity;
double [][][] dcorr_tiles = (fclt_corr != null)? (new double [tp_tasks[0].length][][]):null;
// will use num_acc with variable number of accumulations (e.g. clusters)
//all_fpn
double min_str = all_fpn ? clt_parameters.imp.min_str_fpn : clt_parameters.imp.min_str;
double min_str_sum = all_fpn ? clt_parameters.imp.min_str_sum_fpn : clt_parameters.imp.min_str_sum;
double corr_fz_inter = clt_parameters.getGpuFatZeroInter(ref_scene.isMonochrome());
if (mb_en && (mb_vectors!=null)) { // increase fat zero when there is motion blur
corr_fz_inter *= 8;
}
coord_motion = image_dtt.clt_process_tl_interscene( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of all selected corr pairs
null, // float [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair] (or null). Can be inner null if not used in tp_tasks
null, // double [] dcorr_weight, // alternative to num_acc, compatible with CPU processing (only one non-zero enough)
clt_parameters.gpu_corr_scale, // final double gpu_corr_scale, // 0.75; // reduce GPU-generated correlation values
corr_fz_inter, // final double gpu_fat_zero, // clt_parameters.getGpuFatZero(is_mono);absolute == 30.0
image_dtt.transform_size - 1, // final int gpu_corr_rad, // = transform_size - 1 ?
// The tp_tasks data should be decoded from GPU to get coordinates
// should it be reference or scene? Or any?
tp_tasks[0], // final TpTask [] tp_tasks, // data from the reference frame - will be applied to LMA for the integrated correlations
// to be converted to float (may be null)
dcorr_tiles, // final double [][][] dcorr_tiles, // [tile][pair_abs, sparse][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
pXpYD_ref, // ref_pXpYD, // final double [][] pXpYD, // pXpYD for the reference scene
fpn_offsets, // final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked
fpn_radius, // final double fpn_radius, // radius to be blanked around FPN offset center
fpn_ignore_border, // final boolean fpn_ignore_border, // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
motion_vectors, // final double [][][] motion_vectors, // [tilesY*tilesX][][] -> [][][num_sel_sensors+1][2]
clt_parameters.imp.run_poly, // final boolean run_poly, // polynomial max, if false - centroid
clt_parameters.imp.use_partial, // final boolean use_partial, // find motion vectors for individual pairs, false - for sum only
clt_parameters.imp.centroid_radius,// final double centroid_radius, // 0 - use all tile, >0 - cosine window around local max
clt_parameters.imp.n_recenter, // final int n_recenter, // when cosine window, re-center window this many times
clt_parameters.imp.td_weight, // final double td_weight, // mix correlations accumulated in TD with
clt_parameters.imp.pd_weight, // final double pd_weight, // correlations (post) accumulated in PD
clt_parameters.imp.td_nopd_only, // final boolean td_nopd_only, // only use TD accumulated data if no safe PD is available for the tile.
min_str, // final double min_str_nofpn, // = 0.25;
min_str_sum, // final double min_str_sum_nofpn, // = 0.8; // 5;
clt_parameters.imp.min_str_fpn, // final double min_str, // = 0.25;
clt_parameters.imp.min_str_sum_fpn,// final double min_str_sum, // = 0.8; // 5;
clt_parameters.imp.min_neibs, // final int min_neibs, // 2; // minimal number of strong neighbors (> min_str)
clt_parameters.imp.weight_zero_neibs, // final double weight_zero_neibs,// 0.2; // Reduce weight for no-neib (1.0 for all 8)
half_disparity, // final double half_disparity, // 5.0; // Reduce weight twice for this disparity
clt_parameters.imp.half_avg_diff, // final double half_avg_diff, // 0.2; // when L2 of x,y difference from average of neibs - reduce twice
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level);
// final int globalDebugLevel);
if (coord_motion == null) {
System.out.println("clt_process_tl_interscene() returned null");
return null;
}
if (eq_en) {
// double eq_weight_add = (min_str * clt_parameters.imp.pd_weight + min_str_sum * clt_parameters.imp.td_weight) /
// (clt_parameters.imp.pd_weight + clt_parameters.imp.td_weight);
double [] strength_backup = null;
if (eq_debug) { // **** Set manually in debugger ****
strength_backup = new double [coord_motion[1].length];
for (int i = 0; i < strength_backup.length; i++) if (coord_motion[1][i] != null) {
strength_backup[i] = coord_motion[1][i][2];
}else {
strength_backup[i] = Double.NaN;
}
}
do {
// restore
if (strength_backup != null) {
for (int i = 0; i < strength_backup.length; i++) if (coord_motion[1][i] != null) {
coord_motion[1][i][2] = strength_backup[i];
}
}
equalizeMotionVectorsWeights(
coord_motion, // final double [][][] coord_motion,
tilesX, // final int tilesX,
eq_stride_hor, // final int stride_hor,
eq_stride_vert, // final int stride_vert,
eq_min_stile_weight, // final double min_stile_weight,
eq_min_stile_number, // final int min_stile_number,
eq_min_stile_fraction, // final double min_stile_fraction,
eq_min_disparity, // final double min_disparity,
eq_max_disparity, // final double max_disparity,
eq_weight_add, // final double weight_add,
eq_weight_scale, // final double weight_scale)
eq_level); // equalize to (log) fraction of average/this strength
if (eq_debug) {
String [] mvTitles = {"dx", "dy","conf", "conf0", "pX", "pY","Disp","defined"}; // ,"blurX","blurY", "blur"};
double [][] dbg_img = new double [mvTitles.length][tilesX*tilesY];
for (int l = 0; l < dbg_img.length; l++) {
Arrays.fill(dbg_img[l], Double.NaN);
}
for (int nTile = 0; nTile < coord_motion[0].length; nTile++) {
if (coord_motion[0][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[4+i][nTile] = coord_motion[0][nTile][i];
}
}
dbg_img[3] = strength_backup;
if (coord_motion[1][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[0+i][nTile] = coord_motion[1][nTile][i];
}
}
dbg_img[7][nTile] = ((coord_motion[0][nTile] != null)?1:0)+((coord_motion[0][nTile] != null)?2:0);
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-coord_motion-eq",
mvTitles);
}
} while (eq_debug);
}
if (mov_en) {
String debug_image_name = mov_debug_images ? (scene.getImageName()+"-"+ref_scene.getImageName()+"-movements"): null;
boolean [] move_mask = getMovementMask(
clt_parameters, // CLTParameters clt_parameters,
coord_motion[1], // double [][] motion, // only x,y,w components
tilesX, // int tilesX,
debug_image_name, // String debug_image_name,
mov_debug_level); // int debug_level);
if (move_mask != null) {
for (int nTile=0; nTile < move_mask.length; nTile++) if (move_mask[nTile]) {
coord_motion[1][nTile]= null;
}
}
}
// seems that fclt_corr and fclt_corr1 are both not needed w/o debug
float [][][] fclt_corr1 = ImageDtt.convertFcltCorr( // partial length, matching corr_indices = gpuQuad.getCorrIndices(); // also sets num_corr_tiles
dcorr_tiles, // double [][][] dcorr_tiles,// [tile][sparse, correlation pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
fclt_corr); // float [][][] fclt_corr) // new float [tilesX * tilesY][][] or null
if (show_2d_correlations) { // visualize prepare ref_scene correlation data
float [][] dbg_corr_rslt_partial = ImageDtt.corr_partial_dbg( // not used in lwir
fclt_corr1, // final float [][][] fcorr_data, // [tile][pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
image_dtt.getGPU().getCorrIndices(), // tp_tasks, // final TpTask [] tp_tasks, //
tilesX, //final int tilesX,
tilesY, //final int tilesX,
2*image_dtt.transform_size - 1, // final int corr_size,
1000, // will be limited by available layersfinal int layers0,
clt_parameters.corr_border_contrast, // final double border_contrast,
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel)
String [] titles = new String [dbg_corr_rslt_partial.length]; // dcorr_tiles[0].length];
for (int i = 0; i < titles.length; i++) {
if (i== (titles.length - 1)) {
titles[i] = "sum";
} else {
titles[i] = "sens-"+i;
}
}
// titles.length = 15, corr_rslt_partial.length=16!
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_corr_rslt_partial,
tilesX*(2*image_dtt.transform_size),
tilesY*(2*image_dtt.transform_size),
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-interscene",
titles);
}
if (motion_vectors != null) { if (motion_vectors != null) {
int num_sens = 0; int num_sens = 0;
int num_rslt = 0; int num_rslt = 0;
...@@ -12779,10 +12462,31 @@ public class OpticalFlow { ...@@ -12779,10 +12462,31 @@ public class OpticalFlow {
int debugLevel) int debugLevel)
{ {
// boolean test_motion_blur = true;//false // boolean test_motion_blur = true;//false
// Set up velocities from known coordinates, use averaging
double half_run_range = clt_parameters.ilp.ilma_motion_filter; // 3.50; // make a parameter
double [][] dbg_scale_dt = {clt_parameters.ilp.ilma_scale_xyz, clt_parameters.ilp.ilma_scale_atr};
boolean debug_ers = clt_parameters.ilp.ilma_debug_ers ; // true;
String dbg_ers_string =
((dbg_scale_dt[1][0] > 0)?"a":((dbg_scale_dt[1][0] < 0) ? "A":"0"))+
((dbg_scale_dt[1][1] > 0)?"t":((dbg_scale_dt[1][1] < 0) ? "T":"0"))+
((dbg_scale_dt[1][2] > 0)?"r":((dbg_scale_dt[1][2] < 0) ? "R":"0"))+
((dbg_scale_dt[0][0] > 0)?"x":((dbg_scale_dt[0][0] < 0) ? "X":"0"))+
((dbg_scale_dt[0][1] > 0)?"y":((dbg_scale_dt[0][1] < 0) ? "Y":"0"))+
((dbg_scale_dt[0][2] > 0)?"z":((dbg_scale_dt[0][2] < 0) ? "Z":"0"));
// boolean ers_readjust_enable = true; // debugging ers during readjust 11.02.2022
// boolean ers_use_xyz = false; // true; // false; // enable when ready 11.02.2022
// int [][] dbg_scale_dt = {{0,0,0},{0, 0, 0}}; // no_ers - causes singular matrices (not alwasy), but works
// int [][] dbg_scale_dt = {{0,0,0},{1,-1, 1}}; // aTr
// int [][] dbg_scale_dt = {{0,0,0},{1, 1,-1}}; // atR
// int [][] dbg_scale_dt = {{0,0,0},{1, 1, 1}}; // atr
// int [][] dbg_scale_dt = {{1,1,1},{1, 1, 1}}; // atrxyz
// int [][] dbg_scale_dt = {{1,1,1},{1, 1,-1}}; // atRxyz
// int [][] dbg_scale_dt = {{-1,-1,-1},{1, 1,1}}; // atrXYZ
// int [][] dbg_scale_dt = {{-1,-1,-1},{1, 1,-1}}; // atRXYZ
boolean mb_en = clt_parameters.imp.mb_en; boolean mb_en = clt_parameters.imp.mb_en;
double mb_tau = clt_parameters.imp.mb_tau; // 0.008; // time constant, sec double mb_tau = clt_parameters.imp.mb_tau; // 0.008; // time constant, sec
double mb_max_gain = clt_parameters.imp.mb_max_gain; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel double mb_max_gain = clt_parameters.imp.mb_max_gain_inter; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
int margin = clt_parameters.imp.margin; int margin = clt_parameters.imp.margin;
int earliest_scene = 0; int earliest_scene = 0;
...@@ -12829,6 +12533,7 @@ public class OpticalFlow { ...@@ -12829,6 +12533,7 @@ public class OpticalFlow {
if (mb_ref_disparity == null) { if (mb_ref_disparity == null) {
mb_ref_disparity = quadCLTs[ref_index].getDLS()[use_lma_dsi?1:0]; mb_ref_disparity = quadCLTs[ref_index].getDLS()[use_lma_dsi?1:0];
} }
// ref_pXpYD should correspond to uniform grid (do not undo ERS of the reference scene)
ref_pXpYD = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls ref_pXpYD = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null) null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
mb_ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?) mb_ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
...@@ -12843,67 +12548,91 @@ public class OpticalFlow { ...@@ -12843,67 +12548,91 @@ public class OpticalFlow {
ErsCorrection ers_reference = quadCLTs[ref_index].getErsCorrection(); ErsCorrection ers_reference = quadCLTs[ref_index].getErsCorrection();
double [][][] dxyzatr_dt = new double[quadCLTs.length][][]; // double [][][] dxyzatr_dt = new double[quadCLTs.length][][];
double [][][] scenes_xyzatr = new double [quadCLTs.length][][]; // previous scene relative to the next one double [][][] scenes_xyzatr = new double [quadCLTs.length][][]; // previous scene relative to the next one
scenes_xyzatr[ref_index] = new double[2][3]; // all zeros scenes_xyzatr[ref_index] = new double[2][3]; // all zeros
// should have at least next or previous non-null // should have at least next or previous non-null
int debug_scene = -15; int debug_scene = -15;
double maximal_series_rms = 0.0; double maximal_series_rms = 0.0;
double [][] mb_vectors_ref = null; double [][] mb_vectors_ref = null;
TpTask[][] tp_tasks_ref = null; TpTask[][] tp_tasks_ref = null;
if (debug_ers) {
System.out.println("ERS velocities scale mode = '"+dbg_ers_string+"'");
}
for (int nscene = ref_index; nscene >= earliest_scene; nscene--) { for (int nscene = ref_index; nscene >= earliest_scene; nscene--) {
if (nscene == debug_scene) { if (nscene == debug_scene) {
System.out.println("nscene = "+nscene); System.out.println("nscene = "+nscene);
System.out.println("nscene = "+nscene); System.out.println("nscene = "+nscene);
} }
// just checking it is not isolated
if ((quadCLTs[nscene] == null) || if (quadCLTs[nscene] == null) {
((nscene != ref_index) &&
((ers_reference.getSceneXYZ(quadCLTs[nscene].getImageName())== null) ||
(ers_reference.getSceneATR(quadCLTs[nscene].getImageName())== null)))) {
earliest_scene = nscene + 1; earliest_scene = nscene + 1;
break; break;
} }
if (nscene != ref_index) {
String ts = quadCLTs[nscene].getImageName(); String ts = quadCLTs[nscene].getImageName();
int nscene0 = nscene - 1; if ((ers_reference.getSceneXYZ(ts)== null) || (ers_reference.getSceneATR(ts)== null)) {
if ((nscene0 < 0) ||
(quadCLTs[nscene0]== null)||
(ers_reference.getSceneXYZ(quadCLTs[nscene0].getImageName())== null) ||
(ers_reference.getSceneATR(quadCLTs[nscene0].getImageName())== null)) {
nscene0 = nscene;
}
int nscene1 = nscene + 1;
if ((nscene1 > ref_index) || (quadCLTs[nscene1]== null)) {
nscene1 = nscene;
}
if (nscene1 == nscene0) {
System.out.println("**** Isoloated scene!!! skipping... now may only happen for a ref_scene****");
earliest_scene = nscene + 1; earliest_scene = nscene + 1;
break; break;
} }
double dt = quadCLTs[nscene1].getTimeStamp() - quadCLTs[nscene0].getTimeStamp(); scenes_xyzatr[nscene] = new double[][] {ers_reference.getSceneXYZ(ts), ers_reference.getSceneATR(ts)};
String ts0 = quadCLTs[nscene0].getImageName(); }
String ts1 = quadCLTs[nscene1].getImageName(); if (debug_ers) {
double [] scene_xyz0 = ers_reference.getSceneXYZ(ts0); System.out.println(String.format("%3d xyz: %9.5f %9.5f %9.5f atr: %9.5f %9.5f %9.5f",
double [] scene_atr0 = ers_reference.getSceneATR(ts0); nscene,
if (scene_xyz0 == null) { scenes_xyzatr[nscene][0][0], scenes_xyzatr[nscene][0][1], scenes_xyzatr[nscene][0][2],
System.out.println ("BUG: No egomotion data for timestamp "+ts0); scenes_xyzatr[nscene][1][0], scenes_xyzatr[nscene][1][1], scenes_xyzatr[nscene][1][2]));
System.out.println ("Need to re-run with Force egomotion calculation"); }
}
double [][][] dxyzatr_dt = getVelocitiesFromScenes(
quadCLTs, // QuadCLT [] scenes, // ordered by increasing timestamps
ref_index,
earliest_scene, // int start_scene,
ref_index, // int end1_scene,
scenes_xyzatr, // double [][][] scenes_xyzatr,
half_run_range); // double half_run_range
if (debug_ers) {
System.out.println();
for (int nscene = ref_index; nscene >= earliest_scene; nscene--) {
System.out.println(String.format("%3d xyz_dt: %9.5f %9.5f %9.5f atr_dt: %9.5f %9.5f %9.5f",
nscene,
dxyzatr_dt[nscene][0][0], dxyzatr_dt[nscene][0][1], dxyzatr_dt[nscene][0][2],
dxyzatr_dt[nscene][1][0], dxyzatr_dt[nscene][1][1], dxyzatr_dt[nscene][1][2]));
}
System.out.println();
}
for (int nscene = ref_index; nscene >= earliest_scene; nscene--) {
if (nscene == debug_scene) {
System.out.println("nscene = "+nscene);
System.out.println("nscene = "+nscene);
}
// just checking it is not isolated
if ((quadCLTs[nscene] == null) ||
// (scenes_xyzatr[nscene] == null) || // no velocity even with averaging
((nscene != ref_index) &&
((ers_reference.getSceneXYZ(quadCLTs[nscene].getImageName())== null) ||
(ers_reference.getSceneATR(quadCLTs[nscene].getImageName())== null)))) {
earliest_scene = nscene + 1; earliest_scene = nscene + 1;
break; break;
} }
double [] scene_xyz1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneXYZ(ts1); String ts = quadCLTs[nscene].getImageName();
double [] scene_atr1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneATR(ts1);
dxyzatr_dt[nscene] = new double[2][3];
for (int i = 0; i < 3; i++) {
dxyzatr_dt[nscene][0][i] = (scene_xyz1[i]-scene_xyz0[i])/dt;
dxyzatr_dt[nscene][1][i] = (scene_atr1[i]-scene_atr0[i])/dt;
}
double [] scene_xyz_pre = ZERO3; double [] scene_xyz_pre = ZERO3;
double [] scene_atr_pre = ZERO3; double [] scene_atr_pre = ZERO3;
// find correct signs when setting. dxyzatr_dt[][] is also used for motion blur (correctly)
double [][] scaled_dxyzatr_dt = new double[2][3];
for (int i = 0; i < scaled_dxyzatr_dt.length; i++) {
for (int j = 0; j < scaled_dxyzatr_dt[i].length; j++) {
scaled_dxyzatr_dt[i][j] = dxyzatr_dt[nscene][i][j]* dbg_scale_dt[i][j];
}
}
quadCLTs[nscene].getErsCorrection().setErsDt( // set for ref also (should be set before non-ref!) quadCLTs[nscene].getErsCorrection().setErsDt( // set for ref also (should be set before non-ref!)
ZERO3, //, // dxyzatr_dt[nscene][0], // double [] ers_xyz_dt, scaled_dxyzatr_dt[0], // (ers_use_xyz? dxyzatr_dt[nscene][0]: ZERO3), //, // dxyzatr_dt[nscene][0], // double [] ers_xyz_dt,
dxyzatr_dt[nscene][1]); // double [] ers_atr_dt)(ers_scene_original_xyz_dt); scaled_dxyzatr_dt[1]); // dxyzatr_dt[nscene][1]); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
if (dbg_mb_img != null) { if (dbg_mb_img != null) {
boolean show_corrected = false; boolean show_corrected = false;
if (nscene == debug_scene) { if (nscene == debug_scene) {
...@@ -12914,7 +12643,7 @@ public class OpticalFlow { ...@@ -12914,7 +12643,7 @@ public class OpticalFlow {
Arrays.fill(dbg_mb_img[nscene],Double.NaN); Arrays.fill(dbg_mb_img[nscene],Double.NaN);
double [] mb_scene_xyz = (nscene != ref_index)? ers_reference.getSceneXYZ(ts):ZERO3; double [] mb_scene_xyz = (nscene != ref_index)? ers_reference.getSceneXYZ(ts):ZERO3;
double [] mb_scene_atr = (nscene != ref_index)? ers_reference.getSceneATR(ts):ZERO3; double [] mb_scene_atr = (nscene != ref_index)? ers_reference.getSceneATR(ts):ZERO3;
double [][] motion_blur = getMotionBlur( double [][] motion_blur = getMotionBlur( // dxyzatr_dt[][] should not be scaled here
quadCLTs[ref_index], // QuadCLT ref_scene, quadCLTs[ref_index], // QuadCLT ref_scene,
quadCLTs[nscene], // QuadCLT scene, // can be the same as ref_scene quadCLTs[nscene], // QuadCLT scene, // can be the same as ref_scene
ref_pXpYD, // double [][] ref_pXpYD, // here it is scene, not reference! ref_pXpYD, // double [][] ref_pXpYD, // here it is scene, not reference!
...@@ -12969,7 +12698,7 @@ public class OpticalFlow { ...@@ -12969,7 +12698,7 @@ public class OpticalFlow {
quadCLTs[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref quadCLTs[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
false, // toRGB, // final boolean toRGB, false, // toRGB, // final boolean toRGB,
clt_parameters.imp.show_color_nan, clt_parameters.imp.show_color_nan,
quadCLTs[nscene].getImageName()+"-MOTION_BLUR_CORRECTED", // String suffix, quadCLTs[nscene].getImageName()+"-MOTION_BLUR_CORRECTED-MERGED", // String suffix,
THREADS_MAX, // int threadsMax, THREADS_MAX, // int threadsMax,
debugLevel); // int debugLevel) debugLevel); // int debugLevel)
imp_mbc_merged.show(); imp_mbc_merged.show();
...@@ -12978,9 +12707,11 @@ public class OpticalFlow { ...@@ -12978,9 +12707,11 @@ public class OpticalFlow {
if (nscene == ref_index) { // set GPU reference CLT data - should be before other scenes if (nscene == ref_index) { // set GPU reference CLT data - should be before other scenes
mb_vectors_ref = null; mb_vectors_ref = null;
if (mb_en) { if (mb_en) {
double [][] dxyzatr_dt_ref = getVelocities( // should get velocities from HashMap at reference scene from timestamp , not re-calculate.
quadCLTs, // QuadCLT [] quadCLTs, // double [][] dxyzatr_dt_ref = getVelocities( // was already set
ref_index); // int nscene) // quadCLTs, // QuadCLT [] quadCLTs,
// ref_index); // int nscene)
double [][] dxyzatr_dt_ref = dxyzatr_dt[nscene];
mb_vectors_ref = getMotionBlur( mb_vectors_ref = getMotionBlur(
quadCLTs[ref_index], // QuadCLT ref_scene, quadCLTs[ref_index], // QuadCLT ref_scene,
quadCLTs[ref_index], // QuadCLT scene, // can be the same as ref_scene quadCLTs[ref_index], // QuadCLT scene, // can be the same as ref_scene
...@@ -13009,9 +12740,10 @@ public class OpticalFlow { ...@@ -13009,9 +12740,10 @@ public class OpticalFlow {
scene_xyz_pre = ers_reference.getSceneXYZ(ts); scene_xyz_pre = ers_reference.getSceneXYZ(ts);
scene_atr_pre = ers_reference.getSceneATR(ts); scene_atr_pre = ers_reference.getSceneATR(ts);
if (mb_en) { if (mb_en) {
double [][] dxyzatr_dt_scene = getVelocities( // double [][] dxyzatr_dt_scene = getVelocities(
quadCLTs, // QuadCLT [] quadCLTs, // quadCLTs, // QuadCLT [] quadCLTs,
nscene); // int nscene) // nscene); // int nscene)
double [][] dxyzatr_dt_scene = dxyzatr_dt[nscene];
mb_vectors = getMotionBlur( mb_vectors = getMotionBlur(
quadCLTs[ref_index], // QuadCLT ref_scene, quadCLTs[ref_index], // QuadCLT ref_scene,
quadCLTs[nscene], // QuadCLT scene, // can be the same as ref_scene quadCLTs[nscene], // QuadCLT scene, // can be the same as ref_scene
......
...@@ -1277,6 +1277,28 @@ public class QuadCLT extends QuadCLTCPU { ...@@ -1277,6 +1277,28 @@ public class QuadCLT extends QuadCLTCPU {
debugLevel); debugLevel);
} }
/**
* Making scene_xyz==null or scene_atr==null to use uniform grid (for rendering raw images)
* @param sensor_mask
* @param merge_channels
* @param full_woi_in
* @param clt_parameters
* @param disparity_ref
* @param mb_tau
* @param mb_max_gain
* @param mb_vectors
* @param scene_xyz
* @param scene_atr
* @param scene
* @param ref_scene
* @param toRGB
* @param show_nan
* @param suffix
* @param threadsMax
* @param debugLevel
* @return
*/
public static ImagePlus renderGPUFromDSI( public static ImagePlus renderGPUFromDSI(
final int sensor_mask, final int sensor_mask,
final boolean merge_channels, final boolean merge_channels,
...@@ -1288,8 +1310,8 @@ public class QuadCLT extends QuadCLTCPU { ...@@ -1288,8 +1310,8 @@ public class QuadCLT extends QuadCLTCPU {
double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double [][] mb_vectors, // now [2][ntiles]; double [][] mb_vectors, // now [2][ntiles];
final double [] scene_xyz, // camera center in world coordinates double [] scene_xyz, // camera center in world coordinates. If null - no shift, no ers
final double [] scene_atr, // camera orientation relative to world frame double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene, final QuadCLT scene,
final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
final boolean toRGB, final boolean toRGB,
...@@ -1297,7 +1319,20 @@ public class QuadCLT extends QuadCLTCPU { ...@@ -1297,7 +1319,20 @@ public class QuadCLT extends QuadCLTCPU {
String suffix, String suffix,
int threadsMax, int threadsMax,
final int debugLevel){ final int debugLevel){
double [][] pXpYD =OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene double [][] pXpYD;
if ((scene_xyz == null) || (scene_atr == null)) {
scene_xyz = new double[3];
scene_atr = new double[3];
pXpYD=OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene, // final QuadCLT scene_QuadClt,
ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax)
} else {
pXpYD=OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null) full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
...@@ -1305,6 +1340,7 @@ public class QuadCLT extends QuadCLTCPU { ...@@ -1305,6 +1340,7 @@ public class QuadCLT extends QuadCLTCPU {
scene, // final QuadCLT scene_QuadClt, scene, // final QuadCLT scene_QuadClt,
ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax) threadsMax); // int threadsMax)
}
int rendered_width = scene.getErsCorrection().getSensorWH()[0]; int rendered_width = scene.getErsCorrection().getSensorWH()[0];
if (full_woi_in != null) { if (full_woi_in != null) {
rendered_width = full_woi_in.width * GPUTileProcessor.DTT_SIZE; rendered_width = full_woi_in.width * GPUTileProcessor.DTT_SIZE;
...@@ -1429,7 +1465,7 @@ public class QuadCLT extends QuadCLTCPU { ...@@ -1429,7 +1465,7 @@ public class QuadCLT extends QuadCLTCPU {
/** /**
* Prepare 16x16 texture tiles using GPU from disparity reference. Includes motion blur correction * Prepare 16x16 texture tiles using GPU from disparity reference. Includes motion blur correction
* Does not use scene.saveQuadClt() to re-load new set of Bayer images to the GPU -- should be done by callaer * Does not use scene.saveQuadClt() to re-load new set of Bayer images to the GPU -- should be done by caller
* @param clt_parameters processing parameters * @param clt_parameters processing parameters
* @param disparity_ref disparity scene reference * @param disparity_ref disparity scene reference
* @param mb_tau thermal constant * @param mb_tau thermal constant
......
...@@ -921,10 +921,14 @@ public class TexturedModel { ...@@ -921,10 +921,14 @@ public class TexturedModel {
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt); scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
} }
double [][] dxyzatr_dt = null; double [][] dxyzatr_dt = null;
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
if (mb_en) { // all scenes have the same name/path if (mb_en) { // all scenes have the same name/path
dxyzatr_dt = OpticalFlow.getVelocities( // looks at previous/next scene poses // dxyzatr_dt = OpticalFlow.getVelocities( // looks at previous/next scene poses
scenes, // QuadCLT [] quadCLTs, // scenes, // QuadCLT [] quadCLTs,
nscene); // int nscene) // nscene); // int nscene)
dxyzatr_dt = new double[][] { // for all, including ref
scenes[nscene].getErsCorrection().getErsXYZ_dt(),
scenes[nscene].getErsCorrection().getErsATR_dt()};
} }
scenes[nscene].saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) scenes[nscene].saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU)
for (int nslice = 0; nslice < num_slices; nslice++) { // prepare and measure textures for each combo textures for (int nslice = 0; nslice < num_slices; nslice++) { // prepare and measure textures for each combo textures
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment