Commit 9e339a49 authored by Andrey Filippov's avatar Andrey Filippov

improving matching with MB, working version

parent 6c3ff670
......@@ -24,6 +24,7 @@
package com.elphel.imagej.tileprocessor;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
......@@ -2147,8 +2148,15 @@ public class ErsCorrection extends GeometryCorrection {
xyz4[1] /= xyz4[2];
xyz4[2] = 1.0;
}
Matrix dpscene_dxyz = dx_dpscene.inverse();
Matrix dpscene_dxyz = null;
try {
dpscene_dxyz = dx_dpscene.inverse();
} catch (Exception e) {
// TODO Auto-generated catch block
System.out.println("dx_dpscene is singular");
e.printStackTrace();
return null;
}
Matrix dpscene_dxyz_minus = dpscene_dxyz.times(-1.0); // negated to calculate /d{pX,pY,D) for the scene parameters
double[][] derivatives = new double[DP_NUM_PARS+1][]; // includes [0] - pXpYD vector
// scene pX, pY, Disparity
......
......@@ -1331,7 +1331,7 @@ public class ImageDtt extends ImageDttCPU {
final double [][] pXpYD, // pXpYD for the reference scene
final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked
final double fpn_radius, // radius to be blanked around FPN offset center
final boolean fpn_ignore_border, // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
final boolean fpn_ignore_border, // NOT used! only if fpn_mask != null - ignore tile if maximum touches fpn_mask
final double[][][] motion_vectors, // [tilesY*tilesX][][] -> [][num_sel_sensors+1 or 2][3]
final boolean run_poly, // polynomial max, if false - centroid
final boolean use_partial, // find motion vectors for individual pairs, false - for sum only
......
......@@ -45,6 +45,13 @@ public class IntersceneLmaParameters {
public int ilma_debug_level = 1;
public boolean ilma_debug_adjust_series = false; // Debug images for series of pose and ers
public boolean ilma_debug_invariant = false; // Monitoring variations when restarting program (should be ilma_thread_invariant=true)
public double ilma_motion_filter = 3.5; // filter atr, xyz between consecutive scenes to get velocities
public double [] ilma_scale_xyz = {1.0, 1.0, 1.0};
public double [] ilma_scale_atr = {1.0, 1.0, 1.0}; // roll - minus?
public boolean ilma_debug_ers = false; // Debug ers-related operation
public IntersceneLmaParameters() {
ilma_lma_select[ErsCorrection.DP_DVAZ]= true;
......@@ -141,6 +148,18 @@ public class IntersceneLmaParameters {
"Generate debug images and text output to verify same results regardless of threads" );
gd.addNumericField("Debug level", this.ilma_debug_level, 0,3,"",
"Debug level of interscene LMA operation.");
gd.addMessage("Debug ERS-related operation");
gd.addNumericField("Motion filter (half number of scenes)", this.ilma_motion_filter, 5,7,"",
"filter atr, xyz between consecutive scenes to get velocities.");
gd.addStringField ("XYZ scales", IntersceneMatchParameters.doublesToString(ilma_scale_xyz), 80,
"Scales for X, Y, and Z velocities, such as '1.0 1.0 1.0'.");
gd.addStringField ("ATR scales", IntersceneMatchParameters.doublesToString(ilma_scale_atr), 80,
"Scales for Azimuth, Tilt, and Roll velocities, such as '1.0 1.0 1.0'.");
gd.addCheckbox ("Debug ERS", this.ilma_debug_ers,
"Debug ERS-related parameters during pose fitting." );
}
public void dialogAnswers(GenericJTabbedDialog gd) {
......@@ -164,6 +183,12 @@ public class IntersceneLmaParameters {
this.ilma_debug_adjust_series = gd.getNextBoolean();
this.ilma_debug_invariant = gd.getNextBoolean();
this.ilma_debug_level = (int) gd.getNextNumber();
this.ilma_motion_filter = gd.getNextNumber();
this.ilma_scale_xyz = IntersceneMatchParameters. StringToDoubles(gd.getNextString(), 3);
this.ilma_scale_atr = IntersceneMatchParameters. StringToDoubles(gd.getNextString(), 3);
this.ilma_debug_ers = gd.getNextBoolean();
}
public void setProperties(String prefix,Properties properties){
properties.setProperty(prefix+"ilma_thread_invariant", this.ilma_thread_invariant+"");
......@@ -184,6 +209,11 @@ public class IntersceneLmaParameters {
properties.setProperty(prefix+"ilma_debug_adjust_series",this.ilma_debug_adjust_series+"");
properties.setProperty(prefix+"ilma_debug_invariant", this.ilma_debug_invariant+"");
properties.setProperty(prefix+"ilma_debug_level", this.ilma_debug_level+"");
properties.setProperty(prefix+"ilma_motion_filter", this.ilma_motion_filter+"");
properties.setProperty(prefix+"ilma_scale_xyz", IntersceneMatchParameters.doublesToString(ilma_scale_xyz));
properties.setProperty(prefix+"ilma_scale_atr", IntersceneMatchParameters.doublesToString(ilma_scale_atr));
properties.setProperty(prefix+"ilma_debug_ers", this.ilma_debug_ers+"");
}
public void getProperties(String prefix,Properties properties){
if (properties.getProperty(prefix+"ilma_thread_invariant")!=null) this.ilma_thread_invariant=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_thread_invariant"));
......@@ -206,6 +236,20 @@ public class IntersceneLmaParameters {
if (properties.getProperty(prefix+"ilma_debug_adjust_series")!=null)this.ilma_debug_adjust_series=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_adjust_series"));
if (properties.getProperty(prefix+"ilma_debug_invariant")!=null) this.ilma_debug_invariant=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_invariant"));
if (properties.getProperty(prefix+"ilma_debug_level")!=null) this.ilma_debug_level=Integer.parseInt(properties.getProperty(prefix+"ilma_debug_level"));
if (properties.getProperty(prefix+"ilma_motion_filter")!=null) this.ilma_motion_filter=Double.parseDouble(properties.getProperty(prefix+"ilma_motion_filter"));
if (properties.getProperty(prefix+"ilma_scale_xyz")!=null) {
this.ilma_scale_xyz= IntersceneMatchParameters.StringToDoubles(
properties.getProperty(prefix+"ilma_scale_xyz"),3);
}
if (properties.getProperty(prefix+"ilma_scale_atr")!=null) {
this.ilma_scale_atr= IntersceneMatchParameters.StringToDoubles(
properties.getProperty(prefix+"ilma_scale_atr"),3);
}
if (properties.getProperty(prefix+"ilma_debug_ers")!=null) this.ilma_debug_ers=Boolean.parseBoolean(properties.getProperty(prefix+"ilma_debug_ers"));
}
@Override
......@@ -227,6 +271,12 @@ public class IntersceneLmaParameters {
ilp.ilma_debug_adjust_series = this.ilma_debug_adjust_series;
ilp.ilma_debug_invariant = this.ilma_debug_invariant;
ilp.ilma_debug_level = this.ilma_debug_level;
ilp.ilma_motion_filter = this.ilma_motion_filter;
ilp.ilma_scale_xyz = this.ilma_scale_xyz.clone();
ilp.ilma_scale_atr = this.ilma_scale_atr.clone();
ilp.ilma_debug_ers = this.ilma_debug_ers;
return ilp;
}
}
......@@ -248,6 +248,7 @@ public class IntersceneMatchParameters {
public boolean mb_en = true;
public double mb_tau = 0.008; // time constant, sec
public double mb_max_gain = 5.0; // motion blur maximal gain (if more - move second point more than a pixel
public double mb_max_gain_inter = 2.0; // same for interscene correlation for pose adjustment
public boolean stereo_merge = true;
......@@ -732,6 +733,8 @@ public class IntersceneMatchParameters {
"Sensor exponential decay in seconds.");
gd.addNumericField("Maximal gain", this.mb_max_gain, 5,7,"x",
"Maximal gain for motion blur correction (if needed more for 1 pixel, increase offset).");
gd.addNumericField("Maximal gain pose", this.mb_max_gain_inter, 5,7,"x",
"Maximal gain for motion blur correction during interscene correlation (if needed more for 1 pixel, increase offset).");
gd.addTab("Stereo/Video","Parameters for stereo video generation");
gd.addMessage ("Stereo");
......@@ -1016,7 +1019,7 @@ public class IntersceneMatchParameters {
this.mb_en = gd.getNextBoolean();
this.mb_tau = gd.getNextNumber();
this.mb_max_gain = gd.getNextNumber();
this.mb_max_gain_inter = gd.getNextNumber();
if (stereo_views.length > 0) {
int i = gd.getNextChoiceIndex();
if (i > 0) {
......@@ -1296,7 +1299,7 @@ public class IntersceneMatchParameters {
properties.setProperty(prefix+"mb_en", this.mb_en+""); // boolean
properties.setProperty(prefix+"mb_tau", this.mb_tau+""); // double
properties.setProperty(prefix+"mb_max_gain", this.mb_max_gain+""); // double
properties.setProperty(prefix+"mb_max_gain_inter", this.mb_max_gain_inter+""); // double
properties.setProperty(prefix+"stereo_merge", this.stereo_merge+""); // boolean
properties.setProperty(prefix+"stereo_gap", this.stereo_gap+""); // int
properties.setProperty(prefix+"stereo_intereye", this.stereo_intereye+""); // double
......@@ -1531,6 +1534,7 @@ public class IntersceneMatchParameters {
if (properties.getProperty(prefix+"mb_en")!=null) this.mb_en=Boolean.parseBoolean(properties.getProperty(prefix+"mb_en"));
if (properties.getProperty(prefix+"mb_tau")!=null) this.mb_tau=Double.parseDouble(properties.getProperty(prefix+"mb_tau"));
if (properties.getProperty(prefix+"mb_max_gain")!=null) this.mb_max_gain=Double.parseDouble(properties.getProperty(prefix+"mb_max_gain"));
if (properties.getProperty(prefix+"mb_max_gain_inter")!=null) this.mb_max_gain_inter=Double.parseDouble(properties.getProperty(prefix+"mb_max_gain_inter"));
if (properties.getProperty(prefix+"stereo_merge")!=null) this.stereo_merge=Boolean.parseBoolean(properties.getProperty(prefix+"stereo_merge"));
if (properties.getProperty(prefix+"stereo_gap")!=null) this.stereo_gap=Integer.parseInt(properties.getProperty(prefix+"stereo_gap"));
......@@ -1782,6 +1786,7 @@ public class IntersceneMatchParameters {
imp.mb_en = this.mb_en;
imp.mb_tau = this.mb_tau;
imp.mb_max_gain = this.mb_max_gain;
imp.mb_max_gain_inter = this.mb_max_gain_inter;
imp.stereo_merge = this.stereo_merge;
imp.stereo_gap = this.stereo_gap;
......
......@@ -2470,7 +2470,7 @@ public class OpticalFlow {
title); // dsrbg_titles);
}
public double [][] getSceneDisparityStrength(
public static double [][] getSceneDisparityStrength(
final boolean to_ref_disparity, // false - return scene disparity, true - convert disparity back to the reference scene
final double [] disparity_ref, // invalid tiles - NaN in disparity
final double [] disparity_scene, // invalid tiles - NaN in disparity (just for masking out invalid scene tiles)
......@@ -2758,7 +2758,8 @@ public class OpticalFlow {
final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
int threadsMax)
{
boolean debug_ers = false; // true; // false; // true; // true; //11.01.2022
boolean ignore_ers = false; // false;
TileProcessor tp = scene_QuadClt.getTileProcessor();
final int tilesX = (full_woi_in==null) ? tp.getTilesX() : full_woi_in.width; // full width,includeing extra
final int tilesY = (full_woi_in==null) ? tp.getTilesY() : full_woi_in.height;
......@@ -2794,9 +2795,30 @@ public class OpticalFlow {
final ErsCorrection ersSceneCorrection = scene_QuadClt.getErsCorrection();
final ErsCorrection ersReferenceCorrection = (reference_QuadClt!=null)? reference_QuadClt.getErsCorrection(): ersSceneCorrection;
if (reference_QuadClt!=null) {
if (ignore_ers) {
ersReferenceCorrection.setErsDt(
ZERO3, // double [] ers_xyz_dt,
ZERO3); // double [] ers_atr_dt);
}
ersReferenceCorrection.setupERS(); // just in case - setUP using instance parameters
}
if (ignore_ers) {
ersSceneCorrection.setErsDt(
ZERO3, // double [] ers_xyz_dt,
ZERO3); // double [] ers_atr_dt);
}
ersSceneCorrection.setupERS();
if (debug_ers) {
boolean same_scene = reference_QuadClt == scene_QuadClt;
if (reference_QuadClt != null) {
System.out.println("reference: "+ reference_QuadClt.getImageName());
ersReferenceCorrection.printVectors(null,null);
}
System.out.println("scene: "+ scene_QuadClt.getImageName()+(same_scene? " same scene as ref":""));
ersSceneCorrection.printVectors(scene_xyz, scene_atr);
}
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
......@@ -5950,6 +5972,7 @@ public class OpticalFlow {
* @param nscene index of the current scene
* @return [2][3] array of {{dx/dt, dy/dt, dz/dt}, {dazimuth/dt, dtilt/dt, droll/dt}}
*/
@Deprecated // should get from HashMap at reference scene from timestamp , not re-calculate.
public static double [][] getVelocities(
QuadCLT [] quadCLTs,
int nscene){
......@@ -6003,6 +6026,7 @@ public class OpticalFlow {
QuadCLT [] quadCLTs,
int threadsMax,
int debugLevel) {
boolean corr_raw_ers = true;
double [] stereo_atr = ZERO3; // maybe later play with rotated camera
boolean um_mono = clt_parameters.imp.um_mono;
double um_sigma = clt_parameters.imp.um_sigma;
......@@ -6021,6 +6045,9 @@ public class OpticalFlow {
sensor_mask = 1;
}
String suffix = suffix_in+((mode3d > 0)?(merge_all?"-MERGED":"-SINGLE"):"");
if ((mode3d <0) && (corr_raw_ers)) {
suffix+="-RAW_ERS";
}
int ref_index = quadCLTs.length -1;
int num_sens = quadCLTs[ref_index].getNumSensors();
ErsCorrection ers_reference = quadCLTs[ref_index].getErsCorrection();
......@@ -6053,12 +6080,16 @@ public class OpticalFlow {
if ((scene_atr==null) || (scene_xyz == null)) {
continue;
}
if (mode3d >= 0) {
if ((mode3d >= 0) || corr_raw_ers) {
double [] scene_ers_xyz_dt = ers_reference.getSceneErsXYZ_dt(ts);
double [] scene_ers_atr_dt = ers_reference.getSceneErsATR_dt(ts);
quadCLTs[nscene].getErsCorrection().setErsDt(
scene_ers_xyz_dt, // double [] ers_xyz_dt,
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
if (mode3d < 0) { // velocities != 0, but offset=0
scene_xyz = ZERO3;
scene_atr = ZERO3;
}
} else { // ugly, restore for raw mode that should not be rotated/shifted
scene_xyz = ZERO3;
scene_atr = ZERO3;
......@@ -6076,10 +6107,14 @@ public class OpticalFlow {
int sm = merge_all? -1: sensor_mask;
ImagePlus imp_scene = null;
double [][] dxyzatr_dt = null;
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
if (mb_en) {
dxyzatr_dt = getVelocities(
quadCLTs, // QuadCLT [] quadCLTs,
nscene); // int nscene)
// dxyzatr_dt = getVelocities(
// quadCLTs, // QuadCLT [] quadCLTs,
// nscene); // int nscene)
dxyzatr_dt = new double[][] { // for all, including ref
quadCLTs[nscene].getErsCorrection().getErsXYZ_dt(),
quadCLTs[nscene].getErsCorrection().getErsATR_dt()};
}
......@@ -6123,8 +6158,9 @@ public class OpticalFlow {
clt_parameters, // CLTParameters clt_parameters,
ref_disparity, // double [] disparity_ref,
// not used, just as null/not null now
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
// null means uniform grid, no view transform. even with 0 rot ERS was changing results
((!corr_raw_ers && (mode3d<0))? null:scene_xyz), // final double [] scene_xyz, // camera center in world coordinates
((!corr_raw_ers && (mode3d<0))? null:scene_atr), // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[nscene], // final QuadCLT scene,
quadCLTs[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
toRGB, // final boolean toRGB,
......@@ -6249,33 +6285,26 @@ public class OpticalFlow {
double [] use_atr = null;
int dbg_ntry = 61;
// testing switching to use interCorrPair() instead of interCorrPair_old():
boolean use_new_mode = true;
double [] ref_disparity = null;
double [][] pXpYD_ref = null;
TpTask[] tp_tasks_ref = null;
if (use_new_mode) {
ref_disparity = reference_QuadClt.getDLS()[use_lma_dsi?1:0];
pXpYD_ref = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
reference_QuadClt, // final QuadCLT scene_QuadClt,
reference_QuadClt); // final QuadCLT reference_QuadClt)
tp_tasks_ref = setReferenceGPU (
clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT ref_scene,
ref_disparity, // double [] ref_disparity, // null or alternative reference disparity
pXpYD_ref, // double [][] ref_pXpYD,
reliable_ref, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
// motion blur compensation
0.0, // double mb_tau, // 0.008; // time constant, sec
0.0, // double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
null, // double [][] mb_vectors, // now [2][ntiles];
debugLevel)[0]; // int debug_level)
}
double []ref_disparity = reference_QuadClt.getDLS()[use_lma_dsi?1:0];
double [][] pXpYD_ref = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
reference_QuadClt, // final QuadCLT scene_QuadClt,
reference_QuadClt); // final QuadCLT reference_QuadClt)
TpTask[] tp_tasks_ref = setReferenceGPU (
clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT ref_scene,
ref_disparity, // double [] ref_disparity, // null or alternative reference disparity
pXpYD_ref, // double [][] ref_pXpYD,
reliable_ref, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
// motion blur compensation
0.0, // double mb_tau, // 0.008; // time constant, sec
0.0, // double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
null, // double [][] mb_vectors, // now [2][ntiles];
debugLevel)[0]; // int debug_level)
try_around:
for (rad = 00; rad <= search_rad; rad++) {
......@@ -6295,45 +6324,25 @@ public class OpticalFlow {
if (ntry==dbg_ntry) {
System.out.println("ntry="+ntry);
}
double [][][] coord_motion = null;
if (use_new_mode) {
coord_motion = interCorrPair( // new double [tilesY][tilesX][][];
clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT reference_QuadCLT,
ref_disparity, // null, // double [] ref_disparity, // null or alternative reference disparity
pXpYD_ref, // double [][] pXpYD_ref, // pXpYD for the reference scene 11653
tp_tasks_ref, // TpTask[] tp_tasks_ref, // only (main if MB correction) tasks for FPN correction
scene_QuadClt, // QuadCLT scene_QuadCLT,
pose[0], // camera_xyz0, // xyz
atr, // camera_atr0, // pose[1], // atr
reliable_ref, // null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
facc_2d_img, // final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
null, // final float [][][] dbg_corr_fpn,
false, // boolean near_important, // do not reduce weight of the near tiles
treat_serch_fpn, // true, // boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
null, // double [][] mb_vectors, // now [2][ntiles];
clt_parameters.imp.debug_level, // int imp_debug_level,
clt_parameters.imp.debug_level); // 1); // -1); // int debug_level);
} else {
coord_motion = interCorrPair_old( // new double [tilesY][tilesX][][];
clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT reference_QuadCLT,
null, // double [] ref_disparity, // null or alternative reference disparity
scene_QuadClt, // QuadCLT scene_QuadCLT,
pose[0], // camera_xyz0, // xyz
atr, // camera_atr0, // pose[1], // atr
reliable_ref, // null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
facc_2d_img, // final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
null, // final float [][][] dbg_corr_fpn,
false, // boolean near_important, // do not reduce weight of the near tiles
treat_serch_fpn, // true, // boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
clt_parameters.imp.debug_level, // int imp_debug_level,
clt_parameters.imp.debug_level); // 1); // -1); // int debug_level);
}
double [][][] coord_motion = interCorrPair( // new double [tilesY][tilesX][][];
clt_parameters, // CLTParameters clt_parameters,
reference_QuadClt, // QuadCLT reference_QuadCLT,
ref_disparity, // null, // double [] ref_disparity, // null or alternative reference disparity
pXpYD_ref, // double [][] pXpYD_ref, // pXpYD for the reference scene 11653
tp_tasks_ref, // TpTask[] tp_tasks_ref, // only (main if MB correction) tasks for FPN correction
scene_QuadClt, // QuadCLT scene_QuadCLT,
pose[0], // camera_xyz0, // xyz
atr, // camera_atr0, // pose[1], // atr
reliable_ref, // null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
margin, // final int margin,
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
facc_2d_img, // final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
null, // final float [][][] dbg_corr_fpn,
false, // boolean near_important, // do not reduce weight of the near tiles
treat_serch_fpn, // true, // boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
null, // double [][] mb_vectors, // now [2][ntiles];
clt_parameters.imp.debug_level, // int imp_debug_level,
clt_parameters.imp.debug_level); // 1); // -1); // int debug_level);
// may return null if failed to match
if (coord_motion == null) {
System.out.println("Failed to find a match between the reference scene ("+reference_QuadClt.getImageName() +
......@@ -7119,7 +7128,8 @@ public class OpticalFlow {
/**
* Calculate linear and angular velocities by running-average of the poses
* Calculate linear and angular velocities by running-average of the poses
* Inverts Azimuth and Roll directions
* @param scenes Scene objects (to get timestamps)
* @param scenes_xyzatr scene linear and angular coordinate [scene][2][3]
* @param half_run_range number of scenes each way to average (0 - none,
......@@ -7176,6 +7186,103 @@ public class OpticalFlow {
return ers_xyzatr;
}
/**
* Calculate linear and angular velocities by running-average of the poses
* Does not invert Azimuth and Roll directions or any other
* @param scenes Scene objects (to get timestamps)
* @param ref_index reference scene index in scenes[]
* @param end_scene end scene (inclusive) to process
* @param scenes_xyzatr scene linear and angular coordinate [scene][2][3]
* @param half_run_range number of scenes each way to average (0 - none,
* 1 - 3, 2 - 5) reduced when near the limits
* @return [scene][2][3] xyz/dt, atr/dt.
*/
public static double [][][] getVelocitiesFromScenes(
QuadCLT [] scenes, // ordered by increasing timestamps
int ref_index,
int start_scene,
int end_scene,
double [][][] scenes_xyzatr, // <=0 use +/-1 or +0 if other are not available
double half_run_range
){
double [][][] ers_xyzatr = new double [scenes_xyzatr.length][][];
if (half_run_range <=0 ) {
ErsCorrection ers_reference =scenes[ref_index].getErsCorrection();
for (int nscene = start_scene; nscene <= end_scene; nscene ++) if (scenes[nscene] != null){
int nscene0 = nscene - 1;
if ((nscene0 < start_scene) ||
(scenes[nscene0]== null)||
(ers_reference.getSceneXYZ(scenes[nscene0].getImageName())== null) ||
(ers_reference.getSceneATR(scenes[nscene0].getImageName())== null)) {
nscene0 = nscene;
}
int nscene1 = nscene + 1;
if ((nscene1 > end_scene) || (scenes[nscene1]== null)) {
nscene1 = nscene;
}
if (nscene1 > nscene0) {
String ts0 = scenes[nscene0].getImageName();
String ts1 = scenes[nscene1].getImageName();
double dt = scenes[nscene1].getTimeStamp() - scenes[nscene0].getTimeStamp();
double [] scene_xyz0 = ers_reference.getSceneXYZ(ts0);
double [] scene_atr0 = ers_reference.getSceneATR(ts0);
double [] scene_xyz1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneXYZ(ts1);
double [] scene_atr1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneATR(ts1);
ers_xyzatr[nscene] = new double[2][3];
for (int i = 0; i < 3; i++) {
ers_xyzatr[nscene][0][i] = (scene_xyz1[i]-scene_xyz0[i])/dt;
ers_xyzatr[nscene][1][i] = (scene_atr1[i]-scene_atr0[i])/dt;
}
} else {
System.out.println("**** Isoloated scene!!! skipping... now may only happen for a ref_scene****");
}
}
} else {
double [] weights = new double [(int) Math.floor(half_run_range)+1];
weights[0] = 1;
for (int i = 1; i < weights.length; i++) {
weights[i] = 0.5* (Math.cos(i*Math.PI/half_run_range) +1.0);
}
for (int nscene = start_scene; nscene <= end_scene; nscene ++) if (scenes[nscene] != null){
double tref = scenes[nscene].getTimeStamp();
double s0 = 0.0, sx = 0.0, sx2 = 0.0;
double [][] sy = new double[2][3];
double [][] sxy = new double[2][3];
int nds = 0;
for (int ds = -weights.length + 1; ds < weights.length; ds++) {
int ns = nscene + ds;
if ((ns >= 0) && (ns < scenes_xyzatr.length) && (scenes_xyzatr[ns] != null)) {
double w = (ds >= 0) ? weights[ds] : weights[-ds];
s0 += w;
double dt = scenes[ns].getTimeStamp() - tref;
double wx = w * dt;
sx += wx;
sx2 += wx * dt;
for (int m = 0; m < sy.length; m++) {
for (int d = 0; d < sy[m].length; d++) {
double y = scenes_xyzatr[ns][m][d];
sy [m][d] += w * y;
sxy[m][d] += wx * y;
}
}
nds++; // number of different timestamps used
}
}
if ((nds > 1) && (s0 > 0)) {
ers_xyzatr[nscene] = new double[2][3];
for (int m = 0; m < sy.length; m++) {
for (int d = 0; d < sy[m].length; d++) {
ers_xyzatr[nscene][m][d] = (s0*sxy[m][d]-sx*sy[m][d])/(s0*sx2 - sx*sx);
}
}
}
}
}
return ers_xyzatr;
}
public void IntersceneAccumulate(
CLTParameters clt_parameters,
ColorProcParameters colorProcParameters,
......@@ -9238,6 +9345,9 @@ public class OpticalFlow {
double[][] disparity_map = no_map ? null : new double [image_dtt.getDisparityTitles().length][];
final double disparity_corr = 0.00; // (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
TpTask[] tp_tasks_ref = null;
//TODO: Need to set scenes[indx_ref].getErsCorrection().setErsDt() before processing scenes ?
for (int nscene = 0; nscene < scenes.length; nscene++) {
String ts = scenes[nscene].getImageName();
double [][] scene_pXpYD;
......@@ -11023,479 +11133,6 @@ public class OpticalFlow {
return tp_tasks_ref;
}
// TODO: Still used in spiralSearchATR(), may be updated
@Deprecated
public double [][][] interCorrPair_old( // return [tilesX*telesY]{ref_pXpYD, dXdYS}
CLTParameters clt_parameters,
QuadCLT ref_scene,
double [] ref_disparity, // null or alternative reference disparity
QuadCLT scene,
double [] scene_xyz,
double [] scene_atr,
final boolean [] selection, // may be null, if not null do not process unselected tiles
final int margin,
final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
final float [][] dbg_corr_fpn,
boolean near_important, // do not reduce weight of the near tiles
boolean all_fpn, // do not lower thresholds for non-fpn (used during search)
int imp_debug_level, // MANUALLY change to 2 for debug!
int debug_level)
{
TileProcessor tp = ref_scene.getTileProcessor();
// Temporary reusing same ref scene ******
boolean scene_is_ref_test = clt_parameters.imp.scene_is_ref_test; // false; // true;
boolean show_2d_correlations = clt_parameters.imp.show2dCorrelations(imp_debug_level); // true;
boolean show_motion_vectors = clt_parameters.imp.showMotionVectors(imp_debug_level); // true;
boolean show_render_ref = clt_parameters.imp.renderRef(imp_debug_level); // false; //true;
boolean show_render_scene = clt_parameters.imp.renderScene(imp_debug_level); // false; // true;
boolean toRGB = clt_parameters.imp.toRGB ; // true;
boolean show_coord_motion = clt_parameters.imp.showCorrMotion(imp_debug_level); // mae its own
int erase_clt = (toRGB? clt_parameters.imp.show_color_nan : clt_parameters.imp.show_mono_nan) ? 1:0;
boolean use_lma_dsi = clt_parameters.imp.use_lma_dsi;
boolean mov_en = clt_parameters.imp.mov_en; // true; // enable detection/removal of the moving objects during pose matching
boolean mov_debug_images = clt_parameters.imp.showMovementDetection(imp_debug_level);
int mov_debug_level = clt_parameters.imp.movDebugLevel(imp_debug_level);
boolean fpn_remove = clt_parameters.imp.fpn_remove;
double fpn_max_offset = clt_parameters.imp.fpn_max_offset;
double fpn_radius = clt_parameters.imp.fpn_radius;
boolean fpn_ignore_border = clt_parameters.imp.fpn_ignore_border; // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
boolean eq_debug = false;
boolean eq_en = near_important && clt_parameters.imp.eq_en; // true;// equalize "important" FG tiles for better camera XYZ fitting
int eq_stride_hor = clt_parameters.imp.eq_stride_hor; // 8;
int eq_stride_vert = clt_parameters.imp.eq_stride_vert; // 8;
double eq_min_stile_weight = clt_parameters.imp.eq_min_stile_weight; // 0.2; // 1.0;
int eq_min_stile_number = clt_parameters.imp.eq_min_stile_number; // 10;
double eq_min_stile_fraction =clt_parameters.imp.eq_min_stile_fraction; // 0.02; // 0.05;
double eq_min_disparity = clt_parameters.imp.eq_min_disparity; // 5;
double eq_max_disparity = clt_parameters.imp.eq_max_disparity; // 100;
double eq_weight_add = clt_parameters.imp.eq_weight_add; // 0.05;
double eq_weight_scale = clt_parameters.imp.eq_weight_scale; // 10;
double eq_level = clt_parameters.imp.eq_level; // 0.8; // equalize to (log) fraction of average/this strength
if (scene_is_ref_test) {
scene_xyz = ZERO3.clone();
scene_atr = ZERO3.clone();
ref_scene = scene; // or opposite: scene = ref_scene;
}
int tilesX = tp.getTilesX();
int tilesY = tp.getTilesY();
double [][][] coord_motion = null; // new double [2][tilesX*tilesY][];
final double [][][] motion_vectors = show_motion_vectors?new double [tilesY *tilesX][][]:null;
final float [][][] fclt_corr = ((accum_2d_corr != null) || show_2d_correlations) ?
(new float [tilesX * tilesY][][]) : null;
if (debug_level > 1) {
System.out.println("interCorrPair(): "+IntersceneLma.printNameV3("ATR",scene_atr)+
" "+IntersceneLma.printNameV3("XYZ",scene_xyz));
}
ImageDtt image_dtt;
image_dtt = new ImageDtt(
numSens,
clt_parameters.transform_size,
clt_parameters.img_dtt,
ref_scene.isAux(),
ref_scene.isMonochrome(),
ref_scene.isLwir(),
clt_parameters.getScaleStrength(ref_scene.isAux()),
ref_scene.getGPU());
if (ref_scene.getGPU() != null) {
ref_scene.getGPU().setGpu_debug_level(debug_level - 4); // monitor GPU ops >=-1
}
final double disparity_corr = 0.0; // (z_correction == 0) ? 0.0 : geometryCorrection.getDisparityFromZ(1.0/z_correction);
//ref_disparity
if (ref_disparity == null) {
ref_disparity = ref_scene.getDLS()[use_lma_dsi?1:0];
}
double [][] ref_pXpYD = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene, // final QuadCLT scene_QuadClt,
ref_scene); // final QuadCLT reference_QuadClt)
final double gpu_sigma_corr = clt_parameters.getGpuCorrSigma(ref_scene.isMonochrome());
final double gpu_sigma_rb_corr = ref_scene.isMonochrome()? 1.0 : clt_parameters.gpu_sigma_rb_corr;
final double gpu_sigma_log_corr = clt_parameters.getGpuCorrLoGSigma(ref_scene.isMonochrome());
TpTask[] tp_tasks_ref = GpuQuad.setInterTasks(
ref_scene.getNumSensors(),
ref_scene.getErsCorrection().getSensorWH()[0],
!ref_scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
ref_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
ref_scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
double [][] scene_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
scene_is_ref_test ? null: ref_scene); // final QuadCLT reference_QuadClt)
TpTask[] tp_tasks = GpuQuad.setInterTasks(
scene.getNumSensors(),
scene.getErsCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
selection, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
if (ref_scene.hasGPU()) { // direct convert to reference_clt
float [][][][] fcorr_td = null; // no accumulation, use data in GPU
ref_scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
image_dtt.setReferenceTD( // tp_tasks_ref will be updated
erase_clt,
null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
true, // final boolean use_reference_buffer,
tp_tasks_ref, // final TpTask[] tp_tasks,
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel);
if (show_render_ref) {
ImagePlus imp_render_ref = ref_scene.renderFromTD (
-1, // final int sensor_mask,
false, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
true, //boolean use_reference
"GPU-SHIFTED-REFERENCE"); // String suffix)
imp_render_ref.show();
}
scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
image_dtt.interCorrTD(
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks, // final TpTask[] tp_tasks,
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
gpu_sigma_rb_corr, // final double gpu_sigma_rb_corr, // = 0.5; // apply LPF after accumulating R and B correlation before G, monochrome ? 1.0 :
gpu_sigma_corr, // final double gpu_sigma_corr, // = 0.9;gpu_sigma_corr_m
gpu_sigma_log_corr, // final double gpu_sigma_log_corr, // hpf to reduce dynamic range for correlations
clt_parameters.corr_red, // final double corr_red, // +used
clt_parameters.corr_blue, // final double corr_blue,// +used
sensor_mask_inter, // final int sensor_mask_inter, // The bitmask - which sensors to correlate, -1 - all.
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel);
if (show_render_ref) {
ImagePlus imp_render_ref = ref_scene.renderFromTD (
-1, // final int sensor_mask,
false, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
true, //boolean use_reference
"GPU-SHIFTED-REF"); // String suffix)
imp_render_ref.show();
}
if (show_render_scene) {
ImagePlus imp_render_scene = scene.renderFromTD (
-1, // final int sensor_mask,
false, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
false, //boolean use_reference
"GPU-SHIFTED-SCENE"); // String suffix)
imp_render_scene.show();
}
if (dbg_corr_fpn != null) { // 2*16 or 2*17 (average, individual)
float [][] foffsets = getInterCorrOffsetsDebug(
tp_tasks_ref, // final TpTask[] tp_tasks_ref,
tp_tasks, // final TpTask[] tp_tasks,
tilesX, // final int tilesX,
tilesY); // final int tilesY
for (int i = 0; (i < dbg_corr_fpn.length) && (i < foffsets.length); i++) {
dbg_corr_fpn[i] = foffsets[i];
}
}
double [][] fpn_offsets = null;
if (fpn_remove) {
fpn_offsets = getInterCorrOffsets(
fpn_max_offset, // final double max_offset,
tp_tasks_ref, // final TpTask[] tp_tasks_ref,
tp_tasks, // final TpTask[] tp_tasks,
tilesX, // final int tilesX,
tilesY); // final int tilesY);
}
double half_disparity = near_important ? 0.0 : clt_parameters.imp.half_disparity;
double [][][] dcorr_tiles = (fclt_corr != null)? (new double [tp_tasks.length][][]):null;
// will use num_acc with variable number of accumulations (e.g. clusters)
//all_fpn
double min_str = all_fpn ? clt_parameters.imp.min_str_fpn : clt_parameters.imp.min_str;
double min_str_sum = all_fpn ? clt_parameters.imp.min_str_sum_fpn : clt_parameters.imp.min_str_sum;
coord_motion = image_dtt.clt_process_tl_interscene( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
fcorr_td, // final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of all selected corr pairs
null, // float [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair] (or null). Can be inner null if not used in tp_tasks
null, // double [] dcorr_weight, // alternative to num_acc, compatible with CPU processing (only one non-zero enough)
clt_parameters.gpu_corr_scale, // final double gpu_corr_scale, // 0.75; // reduce GPU-generated correlation values
clt_parameters.getGpuFatZeroInter(ref_scene.isMonochrome()), // final double gpu_fat_zero, // clt_parameters.getGpuFatZero(is_mono);absolute == 30.0
image_dtt.transform_size - 1, // final int gpu_corr_rad, // = transform_size - 1 ?
// The tp_tasks data should be decoded from GPU to get coordinates
tp_tasks, // final TpTask [] tp_tasks, // data from the reference frame - will be applied to LMA for the integrated correlations
// to be converted to float (may be null)
dcorr_tiles, // final double [][][] dcorr_tiles, // [tile][pair_abs, sparse][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
ref_pXpYD, // final double [][] pXpYD, // pXpYD for the reference scene
fpn_offsets, // final double [][] fpn_offsets, // null, or per-tile X,Y offset to be blanked
fpn_radius, // final double fpn_radius, // radius to be blanked around FPN offset center
fpn_ignore_border, // final boolean fpn_ignore_border, // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
motion_vectors, // final double [][][] motion_vectors, // [tilesY*tilesX][][] -> [][][num_sel_sensors+1][2]
clt_parameters.imp.run_poly, // final boolean run_poly, // polynomial max, if false - centroid
clt_parameters.imp.use_partial, // final boolean use_partial, // find motion vectors for individual pairs, false - for sum only
clt_parameters.imp.centroid_radius,// final double centroid_radius, // 0 - use all tile, >0 - cosine window around local max
clt_parameters.imp.n_recenter, // final int n_recenter, // when cosine window, re-center window this many times
clt_parameters.imp.td_weight, // final double td_weight, // mix correlations accumulated in TD with
clt_parameters.imp.pd_weight, // final double pd_weight, // correlations (post) accumulated in PD
clt_parameters.imp.td_nopd_only, // final boolean td_nopd_only, // only use TD accumulated data if no safe PD is available for the tile.
min_str, // final double min_str_nofpn, // = 0.25;
min_str_sum, // final double min_str_sum_nofpn, // = 0.8; // 5;
clt_parameters.imp.min_str_fpn, // final double min_str, // = 0.25;
clt_parameters.imp.min_str_sum_fpn,// final double min_str_sum, // = 0.8; // 5;
clt_parameters.imp.min_neibs, // final int min_neibs, // 2; // minimal number of strong neighbors (> min_str)
clt_parameters.imp.weight_zero_neibs, // final double weight_zero_neibs,// 0.2; // Reduce weight for no-neib (1.0 for all 8)
half_disparity, // final double half_disparity, // 5.0; // Reduce weight twice for this disparity
clt_parameters.imp.half_avg_diff, // final double half_avg_diff, // 0.2; // when L2 of x,y difference from average of neibs - reduce twice
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level);
// final int globalDebugLevel);
if (coord_motion == null) {
System.out.println("clt_process_tl_interscene() returned null");
return null;
}
if (eq_en) {
// double eq_weight_add = (min_str * clt_parameters.imp.pd_weight + min_str_sum * clt_parameters.imp.td_weight) /
// (clt_parameters.imp.pd_weight + clt_parameters.imp.td_weight);
double [] strength_backup = null;
if (eq_debug) { // **** Set manually in debugger ****
strength_backup = new double [coord_motion[1].length];
for (int i = 0; i < strength_backup.length; i++) if (coord_motion[1][i] != null) {
strength_backup[i] = coord_motion[1][i][2];
}else {
strength_backup[i] = Double.NaN;
}
}
do {
// restore
if (strength_backup != null) {
for (int i = 0; i < strength_backup.length; i++) if (coord_motion[1][i] != null) {
coord_motion[1][i][2] = strength_backup[i];
}
}
equalizeMotionVectorsWeights(
coord_motion, // final double [][][] coord_motion,
tilesX, // final int tilesX,
eq_stride_hor, // final int stride_hor,
eq_stride_vert, // final int stride_vert,
eq_min_stile_weight, // final double min_stile_weight,
eq_min_stile_number, // final int min_stile_number,
eq_min_stile_fraction, // final double min_stile_fraction,
eq_min_disparity, // final double min_disparity,
eq_max_disparity, // final double max_disparity,
eq_weight_add, // final double weight_add,
eq_weight_scale, // final double weight_scale)
eq_level); // equalize to (log) fraction of average/this strength
if (eq_debug) {
String [] mvTitles = {"dx", "dy","conf", "conf0", "pX", "pY","Disp","defined"}; // ,"blurX","blurY", "blur"};
double [][] dbg_img = new double [mvTitles.length][tilesX*tilesY];
for (int l = 0; l < dbg_img.length; l++) {
Arrays.fill(dbg_img[l], Double.NaN);
}
for (int nTile = 0; nTile < coord_motion[0].length; nTile++) {
if (coord_motion[0][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[4+i][nTile] = coord_motion[0][nTile][i];
}
}
dbg_img[3] = strength_backup;
if (coord_motion[1][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[0+i][nTile] = coord_motion[1][nTile][i];
}
}
dbg_img[7][nTile] = ((coord_motion[0][nTile] != null)?1:0)+((coord_motion[0][nTile] != null)?2:0);
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-coord_motion-eq",
mvTitles);
}
} while (eq_debug);
}
if (mov_en) {
String debug_image_name = mov_debug_images ? (scene.getImageName()+"-"+ref_scene.getImageName()+"-movements"): null;
boolean [] move_mask = getMovementMask(
clt_parameters, // CLTParameters clt_parameters,
coord_motion[1], // double [][] motion, // only x,y,w components
tilesX, // int tilesX,
debug_image_name, // String debug_image_name,
mov_debug_level); // int debug_level);
if (move_mask != null) {
for (int nTile=0; nTile < move_mask.length; nTile++) if (move_mask[nTile]) {
coord_motion[1][nTile]= null;
}
}
}
float [][][] fclt_corr1 = ImageDtt.convertFcltCorr( // partial length, matching corr_indices = gpuQuad.getCorrIndices(); // also sets num_corr_tiles
dcorr_tiles, // double [][][] dcorr_tiles,// [tile][sparse, correlation pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
fclt_corr); // float [][][] fclt_corr) // new float [tilesX * tilesY][][] or null
if (show_2d_correlations) { // visualize prepare ref_scene correlation data
float [][] dbg_corr_rslt_partial = ImageDtt.corr_partial_dbg( // not used in lwir
fclt_corr1, // final float [][][] fcorr_data, // [tile][pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
image_dtt.getGPU().getCorrIndices(), // tp_tasks, // final TpTask [] tp_tasks, //
tilesX, //final int tilesX,
tilesY, //final int tilesX,
2*image_dtt.transform_size - 1, // final int corr_size,
1000, // will be limited by available layersfinal int layers0,
clt_parameters.corr_border_contrast, // final double border_contrast,
THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel)
String [] titles = new String [dbg_corr_rslt_partial.length]; // dcorr_tiles[0].length];
for (int i = 0; i < titles.length; i++) {
if (i== (titles.length - 1)) {
titles[i] = "sum";
} else {
titles[i] = "sens-"+i;
}
}
// titles.length = 15, corr_rslt_partial.length=16!
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_corr_rslt_partial,
tilesX*(2*image_dtt.transform_size),
tilesY*(2*image_dtt.transform_size),
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-interscene",
titles);
}
} else {
throw new IllegalArgumentException ("interCorrPair(): CPU mode not supported");
}
if (motion_vectors != null) {
int num_sens = 0;
int num_rslt = 0;
find_num_layers:
for (int nt = 0; nt <motion_vectors.length; nt++) {
if (motion_vectors[nt] != null) {
num_sens = motion_vectors[nt].length;
for (int i = 0; i < num_sens; i++) {
if (motion_vectors[nt][i] != null) {
num_rslt = motion_vectors[nt][i].length;
break find_num_layers;
}
}
}
}
double [][] dbg_img = new double [num_sens * num_rslt][tilesX*tilesY];
String [] titles = new String [dbg_img.length];
for (int ns = 0; ns < num_sens; ns++) {
String ss = (ns == (num_sens - 1))?"sum":(""+ns);
for (int nr = 0; nr < num_rslt; nr++) {
int indx = ns*num_rslt+nr;
Arrays.fill(dbg_img[indx],Double.NaN);
String sr="";
switch (nr) {
case 0:sr ="X";break;
case 1:sr ="Y";break;
case 2:sr ="S";break;
default:
sr = ""+nr;
}
titles[indx] = ss+":"+sr;
for (int nt = 0; nt <motion_vectors.length; nt++) {
if ((motion_vectors[nt] != null) && (motion_vectors[nt][ns] != null) ) {
dbg_img[indx][nt] = motion_vectors[nt][ns][nr];
}
}
}
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-motion_vectors",
titles);
}
if (show_coord_motion) {
//coord_motion
String [] mvTitles = {"dx", "dy", "conf", "pX", "pY","Disp","defined"}; // ,"blurX","blurY", "blur"};
double [][] dbg_img = new double [mvTitles.length][tilesX*tilesY];
for (int l = 0; l < dbg_img.length; l++) {
Arrays.fill(dbg_img[l], Double.NaN);
}
for (int nTile = 0; nTile < coord_motion[0].length; nTile++) {
if (coord_motion[0][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[3+i][nTile] = coord_motion[0][nTile][i];
}
}
if (coord_motion[1][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[0+i][nTile] = coord_motion[1][nTile][i];
}
}
dbg_img[6][nTile] = ((coord_motion[0][nTile] != null)?1:0)+((coord_motion[0][nTile] != null)?2:0);
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-coord_motion",
mvTitles);
}
if (debug_level > 0){
int num_defined = 0;
double sum_strength = 0.0;
for (int i = 0; i < coord_motion[1].length; i++) if (coord_motion[1][i] != null){
sum_strength += coord_motion[1][i][2];
num_defined++;
}
System.out.println ("interCorrPair(): num_defined = "+num_defined+
", sum_strength = "+sum_strength+", avg_strength = "+(sum_strength/num_defined));
}
return coord_motion;
}
public double [][][] interCorrPair( // return [tilesX*telesY]{ref_pXpYD, dXdYS}
CLTParameters clt_parameters,
QuadCLT ref_scene,
......@@ -11541,6 +11178,7 @@ public class OpticalFlow {
boolean fpn_ignore_border = clt_parameters.imp.fpn_ignore_border; // only if fpn_mask != null - ignore tile if maximum touches fpn_mask
boolean eq_debug = false;
boolean transform_debug = false;
boolean eq_en = near_important && clt_parameters.imp.eq_en; // true;// equalize "important" FG tiles for better camera XYZ fitting
int eq_stride_hor = clt_parameters.imp.eq_stride_hor; // 8;
int eq_stride_vert = clt_parameters.imp.eq_stride_vert; // 8;
......@@ -11559,7 +11197,7 @@ public class OpticalFlow {
boolean mb_en = clt_parameters.imp.mb_en ;
double mb_tau = clt_parameters.imp.mb_tau; // 0.008; // time constant, sec
double mb_max_gain = clt_parameters.imp.mb_max_gain; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double mb_max_gain = clt_parameters.imp.mb_max_gain_inter; // 2.0; // motion blur maximal gain (if more - move second point more than a pixel
int tilesX = tp.getTilesX();
int tilesY = tp.getTilesY();
......@@ -11597,6 +11235,51 @@ public class OpticalFlow {
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
scene_is_ref_test ? null: ref_scene); // final QuadCLT reference_QuadClt)
if (transform_debug) {
// calculate with no transform
double [][] dbg_ref_pXpYD = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene, // final QuadCLT scene_QuadClt,
ref_scene); // final QuadCLT reference_QuadClt)
double [][] dbg_ref_pXpYD2 = transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3, // scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
new double[] {0,0,0.00001}, // scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene, // final QuadCLT scene_QuadClt,
ref_scene); // final QuadCLT reference_QuadClt)
double [][] dbg_img = new double[6][dbg_ref_pXpYD.length];
for (int i = 0; i < dbg_img.length; i++) {
Arrays.fill(dbg_img[i], Double.NaN);
}
for (int nt = 0; nt < dbg_ref_pXpYD.length; nt++) {
if (dbg_ref_pXpYD[nt] != null) {
for (int i = 0; i < dbg_ref_pXpYD[nt].length; i++){
dbg_img[2*i][nt] = dbg_ref_pXpYD[nt][i];
}
}
if (dbg_ref_pXpYD2[nt] != null) {
for (int i = 0; i < dbg_ref_pXpYD2[nt].length; i++){
dbg_img[2*i+1][nt] = dbg_ref_pXpYD2[nt][i];
}
}
}
String [] dbg_titles = {"pX0","pX2","pY0","pY2","disp0","disp2"};
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene.getImageName()+"-"+ref_scene.getImageName()+"-transform_test",
dbg_titles);
System.out.println("transform_test ref_scene: "+ ref_scene.getImageName());
ref_scene.getErsCorrection().printVectors(scene_xyz, scene_atr);
}
TpTask[][] tp_tasks;
if (mb_en && (mb_vectors!=null)) {
tp_tasks = GpuQuad.setInterTasksMotionBlur(
......@@ -12779,10 +12462,31 @@ public class OpticalFlow {
int debugLevel)
{
// boolean test_motion_blur = true;//false
// Set up velocities from known coordinates, use averaging
double half_run_range = clt_parameters.ilp.ilma_motion_filter; // 3.50; // make a parameter
double [][] dbg_scale_dt = {clt_parameters.ilp.ilma_scale_xyz, clt_parameters.ilp.ilma_scale_atr};
boolean debug_ers = clt_parameters.ilp.ilma_debug_ers ; // true;
String dbg_ers_string =
((dbg_scale_dt[1][0] > 0)?"a":((dbg_scale_dt[1][0] < 0) ? "A":"0"))+
((dbg_scale_dt[1][1] > 0)?"t":((dbg_scale_dt[1][1] < 0) ? "T":"0"))+
((dbg_scale_dt[1][2] > 0)?"r":((dbg_scale_dt[1][2] < 0) ? "R":"0"))+
((dbg_scale_dt[0][0] > 0)?"x":((dbg_scale_dt[0][0] < 0) ? "X":"0"))+
((dbg_scale_dt[0][1] > 0)?"y":((dbg_scale_dt[0][1] < 0) ? "Y":"0"))+
((dbg_scale_dt[0][2] > 0)?"z":((dbg_scale_dt[0][2] < 0) ? "Z":"0"));
// boolean ers_readjust_enable = true; // debugging ers during readjust 11.02.2022
// boolean ers_use_xyz = false; // true; // false; // enable when ready 11.02.2022
// int [][] dbg_scale_dt = {{0,0,0},{0, 0, 0}}; // no_ers - causes singular matrices (not alwasy), but works
// int [][] dbg_scale_dt = {{0,0,0},{1,-1, 1}}; // aTr
// int [][] dbg_scale_dt = {{0,0,0},{1, 1,-1}}; // atR
// int [][] dbg_scale_dt = {{0,0,0},{1, 1, 1}}; // atr
// int [][] dbg_scale_dt = {{1,1,1},{1, 1, 1}}; // atrxyz
// int [][] dbg_scale_dt = {{1,1,1},{1, 1,-1}}; // atRxyz
// int [][] dbg_scale_dt = {{-1,-1,-1},{1, 1,1}}; // atrXYZ
// int [][] dbg_scale_dt = {{-1,-1,-1},{1, 1,-1}}; // atRXYZ
boolean mb_en = clt_parameters.imp.mb_en;
double mb_tau = clt_parameters.imp.mb_tau; // 0.008; // time constant, sec
double mb_max_gain = clt_parameters.imp.mb_max_gain; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double mb_max_gain = clt_parameters.imp.mb_max_gain_inter; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
int margin = clt_parameters.imp.margin;
int earliest_scene = 0;
......@@ -12829,6 +12533,7 @@ public class OpticalFlow {
if (mb_ref_disparity == null) {
mb_ref_disparity = quadCLTs[ref_index].getDLS()[use_lma_dsi?1:0];
}
// ref_pXpYD should correspond to uniform grid (do not undo ERS of the reference scene)
ref_pXpYD = transformToScenePxPyD( // full size - [tilesX*tilesY], some nulls
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
mb_ref_disparity, // dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
......@@ -12843,21 +12548,70 @@ public class OpticalFlow {
ErsCorrection ers_reference = quadCLTs[ref_index].getErsCorrection();
double [][][] dxyzatr_dt = new double[quadCLTs.length][][];
// double [][][] dxyzatr_dt = new double[quadCLTs.length][][];
double [][][] scenes_xyzatr = new double [quadCLTs.length][][]; // previous scene relative to the next one
scenes_xyzatr[ref_index] = new double[2][3]; // all zeros
// should have at least next or previous non-null
int debug_scene = -15;
double maximal_series_rms = 0.0;
double [][] mb_vectors_ref = null;
TpTask[][] tp_tasks_ref = null;
if (debug_ers) {
System.out.println("ERS velocities scale mode = '"+dbg_ers_string+"'");
}
for (int nscene = ref_index; nscene >= earliest_scene; nscene--) {
if (nscene == debug_scene) {
System.out.println("nscene = "+nscene);
System.out.println("nscene = "+nscene);
}
// just checking it is not isolated
if (quadCLTs[nscene] == null) {
earliest_scene = nscene + 1;
break;
}
if (nscene != ref_index) {
String ts = quadCLTs[nscene].getImageName();
if ((ers_reference.getSceneXYZ(ts)== null) || (ers_reference.getSceneATR(ts)== null)) {
earliest_scene = nscene + 1;
break;
}
scenes_xyzatr[nscene] = new double[][] {ers_reference.getSceneXYZ(ts), ers_reference.getSceneATR(ts)};
}
if (debug_ers) {
System.out.println(String.format("%3d xyz: %9.5f %9.5f %9.5f atr: %9.5f %9.5f %9.5f",
nscene,
scenes_xyzatr[nscene][0][0], scenes_xyzatr[nscene][0][1], scenes_xyzatr[nscene][0][2],
scenes_xyzatr[nscene][1][0], scenes_xyzatr[nscene][1][1], scenes_xyzatr[nscene][1][2]));
}
}
double [][][] dxyzatr_dt = getVelocitiesFromScenes(
quadCLTs, // QuadCLT [] scenes, // ordered by increasing timestamps
ref_index,
earliest_scene, // int start_scene,
ref_index, // int end1_scene,
scenes_xyzatr, // double [][][] scenes_xyzatr,
half_run_range); // double half_run_range
if (debug_ers) {
System.out.println();
for (int nscene = ref_index; nscene >= earliest_scene; nscene--) {
System.out.println(String.format("%3d xyz_dt: %9.5f %9.5f %9.5f atr_dt: %9.5f %9.5f %9.5f",
nscene,
dxyzatr_dt[nscene][0][0], dxyzatr_dt[nscene][0][1], dxyzatr_dt[nscene][0][2],
dxyzatr_dt[nscene][1][0], dxyzatr_dt[nscene][1][1], dxyzatr_dt[nscene][1][2]));
}
System.out.println();
}
for (int nscene = ref_index; nscene >= earliest_scene; nscene--) {
if (nscene == debug_scene) {
System.out.println("nscene = "+nscene);
System.out.println("nscene = "+nscene);
}
// just checking it is not isolated
if ((quadCLTs[nscene] == null) ||
// (scenes_xyzatr[nscene] == null) || // no velocity even with averaging
((nscene != ref_index) &&
((ers_reference.getSceneXYZ(quadCLTs[nscene].getImageName())== null) ||
(ers_reference.getSceneATR(quadCLTs[nscene].getImageName())== null)))) {
......@@ -12865,45 +12619,20 @@ public class OpticalFlow {
break;
}
String ts = quadCLTs[nscene].getImageName();
int nscene0 = nscene - 1;
if ((nscene0 < 0) ||
(quadCLTs[nscene0]== null)||
(ers_reference.getSceneXYZ(quadCLTs[nscene0].getImageName())== null) ||
(ers_reference.getSceneATR(quadCLTs[nscene0].getImageName())== null)) {
nscene0 = nscene;
}
int nscene1 = nscene + 1;
if ((nscene1 > ref_index) || (quadCLTs[nscene1]== null)) {
nscene1 = nscene;
}
if (nscene1 == nscene0) {
System.out.println("**** Isoloated scene!!! skipping... now may only happen for a ref_scene****");
earliest_scene = nscene + 1;
break;
}
double dt = quadCLTs[nscene1].getTimeStamp() - quadCLTs[nscene0].getTimeStamp();
String ts0 = quadCLTs[nscene0].getImageName();
String ts1 = quadCLTs[nscene1].getImageName();
double [] scene_xyz0 = ers_reference.getSceneXYZ(ts0);
double [] scene_atr0 = ers_reference.getSceneATR(ts0);
if (scene_xyz0 == null) {
System.out.println ("BUG: No egomotion data for timestamp "+ts0);
System.out.println ("Need to re-run with Force egomotion calculation");
earliest_scene = nscene + 1;
break;
}
double [] scene_xyz1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneXYZ(ts1);
double [] scene_atr1 = (nscene1== ref_index)? ZERO3:ers_reference.getSceneATR(ts1);
dxyzatr_dt[nscene] = new double[2][3];
for (int i = 0; i < 3; i++) {
dxyzatr_dt[nscene][0][i] = (scene_xyz1[i]-scene_xyz0[i])/dt;
dxyzatr_dt[nscene][1][i] = (scene_atr1[i]-scene_atr0[i])/dt;
}
double [] scene_xyz_pre = ZERO3;
double [] scene_atr_pre = ZERO3;
// find correct signs when setting. dxyzatr_dt[][] is also used for motion blur (correctly)
double [][] scaled_dxyzatr_dt = new double[2][3];
for (int i = 0; i < scaled_dxyzatr_dt.length; i++) {
for (int j = 0; j < scaled_dxyzatr_dt[i].length; j++) {
scaled_dxyzatr_dt[i][j] = dxyzatr_dt[nscene][i][j]* dbg_scale_dt[i][j];
}
}
quadCLTs[nscene].getErsCorrection().setErsDt( // set for ref also (should be set before non-ref!)
ZERO3, //, // dxyzatr_dt[nscene][0], // double [] ers_xyz_dt,
dxyzatr_dt[nscene][1]); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
scaled_dxyzatr_dt[0], // (ers_use_xyz? dxyzatr_dt[nscene][0]: ZERO3), //, // dxyzatr_dt[nscene][0], // double [] ers_xyz_dt,
scaled_dxyzatr_dt[1]); // dxyzatr_dt[nscene][1]); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
if (dbg_mb_img != null) {
boolean show_corrected = false;
if (nscene == debug_scene) {
......@@ -12914,7 +12643,7 @@ public class OpticalFlow {
Arrays.fill(dbg_mb_img[nscene],Double.NaN);
double [] mb_scene_xyz = (nscene != ref_index)? ers_reference.getSceneXYZ(ts):ZERO3;
double [] mb_scene_atr = (nscene != ref_index)? ers_reference.getSceneATR(ts):ZERO3;
double [][] motion_blur = getMotionBlur(
double [][] motion_blur = getMotionBlur( // dxyzatr_dt[][] should not be scaled here
quadCLTs[ref_index], // QuadCLT ref_scene,
quadCLTs[nscene], // QuadCLT scene, // can be the same as ref_scene
ref_pXpYD, // double [][] ref_pXpYD, // here it is scene, not reference!
......@@ -12969,7 +12698,7 @@ public class OpticalFlow {
quadCLTs[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
false, // toRGB, // final boolean toRGB,
clt_parameters.imp.show_color_nan,
quadCLTs[nscene].getImageName()+"-MOTION_BLUR_CORRECTED", // String suffix,
quadCLTs[nscene].getImageName()+"-MOTION_BLUR_CORRECTED-MERGED", // String suffix,
THREADS_MAX, // int threadsMax,
debugLevel); // int debugLevel)
imp_mbc_merged.show();
......@@ -12978,9 +12707,11 @@ public class OpticalFlow {
if (nscene == ref_index) { // set GPU reference CLT data - should be before other scenes
mb_vectors_ref = null;
if (mb_en) {
double [][] dxyzatr_dt_ref = getVelocities(
quadCLTs, // QuadCLT [] quadCLTs,
ref_index); // int nscene)
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
// double [][] dxyzatr_dt_ref = getVelocities( // was already set
// quadCLTs, // QuadCLT [] quadCLTs,
// ref_index); // int nscene)
double [][] dxyzatr_dt_ref = dxyzatr_dt[nscene];
mb_vectors_ref = getMotionBlur(
quadCLTs[ref_index], // QuadCLT ref_scene,
quadCLTs[ref_index], // QuadCLT scene, // can be the same as ref_scene
......@@ -13009,9 +12740,10 @@ public class OpticalFlow {
scene_xyz_pre = ers_reference.getSceneXYZ(ts);
scene_atr_pre = ers_reference.getSceneATR(ts);
if (mb_en) {
double [][] dxyzatr_dt_scene = getVelocities(
quadCLTs, // QuadCLT [] quadCLTs,
nscene); // int nscene)
// double [][] dxyzatr_dt_scene = getVelocities(
// quadCLTs, // QuadCLT [] quadCLTs,
// nscene); // int nscene)
double [][] dxyzatr_dt_scene = dxyzatr_dt[nscene];
mb_vectors = getMotionBlur(
quadCLTs[ref_index], // QuadCLT ref_scene,
quadCLTs[nscene], // QuadCLT scene, // can be the same as ref_scene
......@@ -13034,8 +12766,8 @@ public class OpticalFlow {
quadCLTs[nscene], // QuadCLT scene_QuadCLT,
scene_xyz_pre, // xyz
scene_atr_pre, // atr
clt_parameters.ilp.ilma_lma_select, // final boolean[] param_select,
clt_parameters.ilp.ilma_regularization_weights, // final double [] param_regweights,
clt_parameters.ilp.ilma_lma_select, // final boolean[] param_select,
clt_parameters.ilp.ilma_regularization_weights, // final double [] param_regweights,
lma_rms, // double [] rms, // null or double [2]
clt_parameters.imp.max_rms, // double max_rms,
// motion blur compensation
......
......@@ -1277,6 +1277,28 @@ public class QuadCLT extends QuadCLTCPU {
debugLevel);
}
/**
* Making scene_xyz==null or scene_atr==null to use uniform grid (for rendering raw images)
* @param sensor_mask
* @param merge_channels
* @param full_woi_in
* @param clt_parameters
* @param disparity_ref
* @param mb_tau
* @param mb_max_gain
* @param mb_vectors
* @param scene_xyz
* @param scene_atr
* @param scene
* @param ref_scene
* @param toRGB
* @param show_nan
* @param suffix
* @param threadsMax
* @param debugLevel
* @return
*/
public static ImagePlus renderGPUFromDSI(
final int sensor_mask,
final boolean merge_channels,
......@@ -1288,8 +1310,8 @@ public class QuadCLT extends QuadCLTCPU {
double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double [][] mb_vectors, // now [2][ntiles];
final double [] scene_xyz, // camera center in world coordinates
final double [] scene_atr, // camera orientation relative to world frame
double [] scene_xyz, // camera center in world coordinates. If null - no shift, no ers
double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
final boolean toRGB,
......@@ -1297,14 +1319,28 @@ public class QuadCLT extends QuadCLTCPU {
String suffix,
int threadsMax,
final int debugLevel){
double [][] pXpYD =OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax)
double [][] pXpYD;
if ((scene_xyz == null) || (scene_atr == null)) {
scene_xyz = new double[3];
scene_atr = new double[3];
pXpYD=OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
ref_scene, // final QuadCLT scene_QuadClt,
ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax)
} else {
pXpYD=OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax)
}
int rendered_width = scene.getErsCorrection().getSensorWH()[0];
if (full_woi_in != null) {
rendered_width = full_woi_in.width * GPUTileProcessor.DTT_SIZE;
......@@ -1429,7 +1465,7 @@ public class QuadCLT extends QuadCLTCPU {
/**
* Prepare 16x16 texture tiles using GPU from disparity reference. Includes motion blur correction
* Does not use scene.saveQuadClt() to re-load new set of Bayer images to the GPU -- should be done by callaer
* Does not use scene.saveQuadClt() to re-load new set of Bayer images to the GPU -- should be done by caller
* @param clt_parameters processing parameters
* @param disparity_ref disparity scene reference
* @param mb_tau thermal constant
......
......@@ -921,10 +921,14 @@ public class TexturedModel {
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
}
double [][] dxyzatr_dt = null;
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
if (mb_en) { // all scenes have the same name/path
dxyzatr_dt = OpticalFlow.getVelocities( // looks at previous/next scene poses
scenes, // QuadCLT [] quadCLTs,
nscene); // int nscene)
// dxyzatr_dt = OpticalFlow.getVelocities( // looks at previous/next scene poses
// scenes, // QuadCLT [] quadCLTs,
// nscene); // int nscene)
dxyzatr_dt = new double[][] { // for all, including ref
scenes[nscene].getErsCorrection().getErsXYZ_dt(),
scenes[nscene].getErsCorrection().getErsATR_dt()};
}
scenes[nscene].saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU)
for (int nslice = 0; nslice < num_slices; nslice++) { // prepare and measure textures for each combo textures
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment