Commit 4a5fabd6 authored by Andrey Filippov's avatar Andrey Filippov

Improving FPN removal

parent 33bb38e2
......@@ -19,7 +19,7 @@ public class PolynomialApproximation {
double [] SF=new double [N+1];
for (int i=0;i<=2*N;i++) S[i]=0.0;
for (int i=0;i<=N;i++) SF[i]=0.0;
for (int i=0;i<data.length;i++){
for (int i=0;i<data.length;i++) if (data[i] != null){
double wxn=(data[i].length>2)?data[i][2]:1.0;
if (wxn>0.0){ // save time on 0.0 that can be used to mask out some samples
double f=data[i][1];
......
......@@ -5,6 +5,7 @@ import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import com.elphel.imagej.cameras.CLTParameters;
import com.elphel.imagej.common.DoubleGaussianBlur;
import com.elphel.imagej.common.PolynomialApproximation;
import com.elphel.imagej.common.ShowDoubleFloatArrays;
import com.elphel.imagej.gpu.GPUTileProcessor;
......@@ -77,29 +78,39 @@ public class CorrectionFPN {
return;
}
// pu
public ImagePlus saveShowFPN(
double [][][] fpn,
int width,
boolean save,
boolean show) {
return saveShowFPN(
fpn, // double [][][] fpn,
width, // int width,
save, // boolean save,
show, // boolean show,
QuadCLT.CENTER_FPN_SUFFIX); // String suffix)
}
public ImagePlus saveShowFPN(
double [][][] fpn,
int width,
boolean save,
boolean show,
String suffix) {
ImagePlus imp = CorrectionFPN.showFPN(
fpn, // double [][][] fpn,
width, // int width,
show); // boolean show)
if (save && (imp != null)) {
quadCLT.saveImagePlusInModelDirectory(
QuadCLT.CENTER_FPN_SUFFIX, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
suffix, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
}
return imp;
}
public ImagePlus saveShowRowCol(
final double [][][] rows, // [scene][sensor][width]
final double [][][] cols, // [scene][sensor][height]
......@@ -454,205 +465,92 @@ public class CorrectionFPN {
return disparity_center;
}
public static double [][][] backPropagate(
CLTParameters clt_parameters,
final int discard_border,
final double max_fold,
final int min_in_row_col, // Minimal number of defined tiles in a row/column
final int invert_margins, // 1 Expand image each side when inverting tasks
final int invert_margins, // 0 Expand image each side when inverting tasks
final int invert_gap2, // 10 // Maximal dual gap size for inversion (depends on scanning radius in tiles)
final int invert_iters, // 4 Enhance inversion iterations
final double invert_tolerance,// 0.001 Finish enhancing when last change was lower than
final QuadCLT center_CLT,
final QuadCLT scene_CLT,
final QuadCLT [] quadCLTs,
final int first_index,
final int last_index,
final double [] disparity_center,
final int debugLevel) {
//CorrectionFPN.class mb_en == false;
// boolean condition_dsi = true;
scene_CLT.setImageCenter(center_CLT.getImageCenter()); // set back-propagate mode
boolean batch_run =clt_parameters.batch_run; // may be modified for debug
int width=center_CLT.getTilesX()*center_CLT.getTileSize();
int height=center_CLT.getTilesY()*center_CLT.getTileSize();
// window in pixels!
boolean apply_window_filter = (discard_border > 0) || (max_fold > 0) || (min_in_row_col > 0);
final Rectangle window = apply_window_filter ? (new Rectangle(discard_border,discard_border,width-2*discard_border,height-2*discard_border)): null;
boolean cuas_debug = clt_parameters.imp.cuas_debug && !batch_run; // save debug images (and show them if not in batch mode)
String ts = scene_CLT.getImageName();
// final double dts =scene_CLT.getTimeStamp();
double [] scene_xyz = OpticalFlow.ZERO3;
double [] scene_atr = OpticalFlow.ZERO3;
ErsCorrection ers_reference = center_CLT.getErsCorrection();
scene_xyz = ers_reference.getSceneXYZ(ts); // saved @ reference, relative to reference
scene_atr = ers_reference.getSceneATR(ts); // saved @ reference, relative to reference
if ((scene_atr==null) || (scene_xyz == null)) {
System.out.println("backPropagate() BUG : ((scene_atr==null) || (scene_xyz == null)");
return null;
}
double [] scene_ers_xyz_dt = ers_reference.getSceneErsXYZ_dt(ts);
double [] scene_ers_atr_dt = ers_reference.getSceneErsATR_dt(ts);
scene_CLT.getErsCorrection().setErsDt(
scene_ers_xyz_dt, // double [] ers_xyz_dt,
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
double [][] scene_pXpYD=OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_center, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene_CLT, // final QuadCLT scene_QuadClt,
center_CLT, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
ImageDtt.THREADS_MAX); // int threadsMax)
final int width = center_CLT.getTilesX()*center_CLT.getTileSize();
final int height = center_CLT.getTilesY()*center_CLT.getTileSize();
final int num_sens = center_CLT.getNumSensors();
final int num_pix = width*height;
final double [][] fpn = new double [num_sens][num_pix];
final double [][] fpn_weights = new double [num_sens][num_pix];
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
if (window != null) {
center_CLT.windowPsPyD(
scene_pXpYD, // final double [][] pXpYD,
window, // final Rectangle window) // window in pixels!
max_fold, // final double max_fold)
min_in_row_col); // final int min_in_row_col, // Minimal number of defined tiles in a row/column
}
int rendered_width = scene_CLT.getErsCorrection().getSensorWH()[0];
boolean showPxPyD = cuas_debug; // false;
if (showPxPyD) {
int dbg_width = rendered_width/GPUTileProcessor.DTT_SIZE;
int dbg_height = scene_pXpYD.length/dbg_width;
String [] dbg_titles = new String[] {"pX","pY","Disparity"};
double [][] dbg_img = new double [dbg_titles.length][scene_pXpYD.length];
for (int i = 0; i < dbg_img.length; i++) {
Arrays.fill(dbg_img[i], Double.NaN);
for (int nscene = first_index; nscene <= last_index ; nscene++) { // was reversed ***
if (debugLevel > -3) {
System.out.println ("Processing scene "+nscene+" ("+quadCLTs[nscene].getImageName()+"). Last is "+last_index);
}
for (int nTile = 0; nTile < scene_pXpYD.length; nTile++){
if (scene_pXpYD[nTile] != null) {
for (int i = 0; i < scene_pXpYD[nTile].length; i++) {
dbg_img[i][nTile] = scene_pXpYD[nTile][i];
double [][][] diff_src_synth_weights = backPropagate(
clt_parameters, // CLTParameters clt_parameters,
discard_border, // final int discard_border,
max_fold, // final double max_fold,
min_in_row_col, // final int min_in_row_col, // Minimal number of defined tiles in a row/column
invert_margins, // final int invert_margins, // 1 Expand image each side when inverting tasks
invert_gap2, // final int invert_gap2, // 10 // Maximal dual gap size for inversion (depends on scanning radius in tiles)
invert_iters, // final int invert_iters, // 4 Enhance inversion iterations
invert_tolerance, // final double invert_tolerance,// 0.001 Finish enhancing when last change was lower than
center_CLT, // final QuadCLT center_CLT,
quadCLTs[nscene], // final QuadCLT scene_CLT,
disparity_center, // double [] disparity_center
debugLevel); // final int debugLevel)
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nSens = ai.getAndIncrement(); nSens < num_sens; nSens = ai.getAndIncrement()) {
double [] diffs_sens = diff_src_synth_weights[0][nSens];
double [] weights = diff_src_synth_weights[1][nSens];
for (int npix = 0; npix < num_pix; npix++) {
double d = diffs_sens[npix];
if (!Double.isNaN(d)) {
double w = weights[npix];
fpn[nSens][npix] += d * w;
fpn_weights[nSens][npix] += w;
}
}
}
}
}
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
dbg_width,
dbg_height,
true,
scene_CLT.getImageName()+"-back-pXpYD",
dbg_titles);
};
}
ImageDtt.startAndJoin(threads);
}
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nSens = ai.getAndIncrement(); nSens < num_sens; nSens = ai.getAndIncrement()) {
for (int npix = 0; npix < num_pix; npix++) {
fpn[nSens][npix] /= fpn_weights[nSens][npix];
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return new double [][][] {fpn, fpn_weights};
// Skipping MB
TpTask[][] tp_tasks;
tp_tasks = new TpTask[1][];
tp_tasks[0] = GpuQuad.setInterTasks( // "true" reference, with stereo actual reference will be offset
scene_CLT.getNumSensors(),
rendered_width, // should match output size, pXpYD.length
!scene_CLT.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
scene_pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene_CLT.getErsCorrection(), // final GeometryCorrection geometryCorrection,
clt_parameters.imp.disparity_corr, // 04/07/2023 // 0.0, // final double disparity_corr,
-1, // 0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
ImageDtt.THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
scene_CLT.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
ImageDtt image_dtt = new ImageDtt(
scene_CLT.getNumSensors(),
clt_parameters.transform_size,
clt_parameters.img_dtt,
scene_CLT.isAux(),
scene_CLT.isMonochrome(),
scene_CLT.isLwir(),
clt_parameters.getScaleStrength(scene_CLT.isAux()),
scene_CLT.getGPU());
boolean use_reference = false;
int [] wh = null;
final boolean show_nan = true;
int erase_clt = show_nan ? 1:0;
image_dtt.preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
clt_parameters.img_dtt, // , // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks[0], // final TpTask[] tp_tasks,
false, // final boolean keep_tiles_offsets, // keep per-sensors offsets in tp_tasks
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
debugLevel); // final int globalDebugLevel)
// invert tasks here **************************************************
String dbg_title=center_CLT.getImageName()+"-PREINVERT-MARGIN_"+invert_margins;
tp_tasks[0] = invertTask(
clt_parameters,//CLTParameters clt_parameters,
center_CLT, // final QuadCLT center_CLT,
tp_tasks[0], // TpTask [] direct_tasks,
center_CLT.getNumSensors(), // final int numSensors,
center_CLT.getTilesX(), // final int tilesX,
center_CLT.getTilesY(), // final int tilesY,
center_CLT.getTileSize(), // final int tileSize,
invert_margins, // final int margins, //
invert_gap2, // final int invert_gap2, // 10 // Maximal dual gap size for inversion (depends on scanning radius in tiles)
invert_iters, // final int num_iter,
invert_tolerance, // final double invert_tolerance,
dbg_title); // String dbg_title)
image_dtt.preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
clt_parameters.img_dtt, // , // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks[0], // final TpTask[] tp_tasks,
true, // final boolean keep_tiles_offsets, // keep per-sensors offsets in tp_tasks
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
debugLevel); // final int globalDebugLevel)
image_dtt.execConvertDirect(
use_reference, // boolean use_reference_buffer,
wh, // int [] wh,
erase_clt); //int erase_clt) {
double [][][] result = scene_CLT.renderDoubleFromTD (
wh, // null, // int [] wh,
use_reference); // boolean use_reference
image_dtt.getGPU().updateTasks(
tp_tasks[0],
false);
if (cuas_debug) {
String [] titles_top = {"src_images","back"};
double [][][] src_img = scene_CLT.getImageData();
double [][][] dbg_data = new double [titles_top.length][result.length][result[0][0].length];
String title = scene_CLT.getImageName()+"-BACKPROPAGATE";
String [] titles = new String[result.length];
for (int i = 0; i < result.length; i++) {
titles[i] = "SENS-"+i;
dbg_data[0][i] = src_img[i][0];
dbg_data[1][i] = result[i][0];
}
ImagePlus imp= ShowDoubleFloatArrays.showArraysHyperstack(
dbg_data, // double[][][] pixels,
width, // int width,
title, // String title, "time_derivs-rt"+diff_time_rt+"-rxy"+diff_time_rxy,
titles, // String [] titles, // all slices*frames titles or just slice titles or null
titles_top, // String [] frame_titles, // frame titles or null
!batch_run); // boolean show)
if ((imp != null) && !batch_run) {
//refCLT
String suffix ="-BACKPROPAGATE";
center_CLT.saveImagePlusInModelDirectory(
suffix, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
}
}
scene_CLT.setImageCenter(null); // disable back-propagate mode
return result;
}
public static double [][][] backPropagate_dbg(
public static double [][][] backPropagate(
CLTParameters clt_parameters,
final int discard_border,
final double max_fold,
final int min_in_row_col, // Minimal number of defined tiles in a row/column
final int invert_margins, // 1 Expand image each side when inverting tasks
final int invert_margins, // 0 Expand image each side when inverting tasks
final int invert_gap2, // 10 // Maximal dual gap size for inversion (depends on scanning radius in tiles)
final int invert_iters, // 4 Enhance inversion iterations
final double invert_tolerance,// 0.001 Finish enhancing when last change was lower than
......@@ -660,8 +558,6 @@ public class CorrectionFPN {
final QuadCLT scene_CLT,
final double [] disparity_center,
final int debugLevel) {
//CorrectionFPN.class mb_en == false;
// boolean condition_dsi = true;
scene_CLT.setImageCenter(center_CLT.getImageCenter()); // set back-propagate mode
boolean batch_run =clt_parameters.batch_run; // may be modified for debug
int width=center_CLT.getTilesX()*center_CLT.getTileSize();
......@@ -747,6 +643,7 @@ public class CorrectionFPN {
ImageDtt.THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
scene_CLT.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
// maybe no need to reload same image?
ImageDtt image_dtt = new ImageDtt(
scene_CLT.getNumSensors(),
clt_parameters.transform_size,
......@@ -771,11 +668,8 @@ public class CorrectionFPN {
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
debugLevel); // final int globalDebugLevel)
// invert tasks here **************************************************
String dbg_title=center_CLT.getImageName()+"-PREINVERT-MARGIN_"+invert_margins;
boolean do_invert = true;
String dbg_title =cuas_debug ? (center_CLT.getImageName()+"-PREINVERT-MARGIN_"+invert_margins): null;
if (do_invert) {
tp_tasks[0] = invertTask(
clt_parameters,//CLTParameters clt_parameters,
center_CLT, // final QuadCLT center_CLT,
......@@ -789,7 +683,6 @@ public class CorrectionFPN {
invert_iters, // final int num_iter,
invert_tolerance, // final double invert_tolerance,
dbg_title); // String dbg_title)
}
image_dtt.preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
clt_parameters.img_dtt, // , // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
......@@ -800,67 +693,126 @@ public class CorrectionFPN {
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
debugLevel); // final int globalDebugLevel)
// quadCLTs[scene_index].setImageCenter(center_CLT.getImageCenter());
double [][] image_center = scene_CLT.getImageCenter(); // backup
//center_CLT
if (!do_invert) {
scene_CLT.setImageCenter(null);
scene_CLT.no_kernels = true; // fooling execConvertDirect to skip kernels use
// scene_CLT.getGPU().rectilinear = true;
// use source images, not average combo
//double [][] getImageCenter()
}
image_dtt.execConvertDirect(
use_reference, // boolean use_reference_buffer,
wh, // int [] wh,
erase_clt); //int erase_clt) {
if (!do_invert) {
scene_CLT.no_kernels = false;
}
erase_clt, // int erase_clt) {
true, // boolean no_kernels)
true); // boolean use_center_image)
double [][][] result = scene_CLT.renderDoubleFromTD (
wh, // null, // int [] wh,
use_reference); // boolean use_reference
/*
image_dtt.getGPU().updateTasks(
tp_tasks[0],
false);
*/
int order = 2;
scene_CLT.setImageCenter(null); // disable back-propagate mode
double [][] coeffs = photometric(
scene_CLT.getImageData(), // inal double [][][] raw_img,
result, // final double [][][] synth_img,
width, // final int width,
order, // final int order,
debugLevel); // final int debugLevel){
final double um_sigma = 5;
final int good_margins = 12;
final double threshold= 20; // 5;
final double w_blur = 2.5; // 5;
double [][] weights = getFPNWeights(
result, // final double [][][] synth_img,
um_sigma, // final double um_sigma,
good_margins, // final int good_margins,
threshold, // final double threshold,
w_blur, // final double w_blur,
width); // final int width
if (debugLevel > -3) {
for (int nsens = 0; nsens < coeffs.length; nsens++) {
System.out.println (String.format("%2d: C=%8f B=%8f A=%8f", nsens, coeffs[nsens][0], coeffs[nsens][1], coeffs[nsens][2]));
}
}
double [][][] source_img = scene_CLT.getImageData();
double [][] diff_src_synth = new double [source_img.length][source_img[0][0].length];
for (int nsens = 0; nsens < coeffs.length; nsens++) {
diff_src_synth[nsens] = new double[diff_src_synth[nsens].length]; // {corr_img[nsens][0].clone()};
for (int i = 0; i < diff_src_synth[nsens].length; i++) {
double d = result[nsens][0][i]; // src_img[nsens][0][i];
diff_src_synth[nsens][i] = source_img[nsens][0][i] - (coeffs[nsens][0] + coeffs[nsens][1] * d + coeffs[nsens][2]*d*d);
}
}
if (cuas_debug) {
String [] titles_top = {"src_images","back"};
String [] titles_top = {"src_images","back", "diff", "weights"};
double [][][] src_img = scene_CLT.getImageData();
double [][][] dbg_data = new double [titles_top.length][result.length][result[0][0].length];
String title = scene_CLT.getImageName()+"-BACKPROPAGATE";
String title = scene_CLT.getImageName()+"-BACKPROPAGATE-DIFF";
String [] titles = new String[result.length];
for (int i = 0; i < result.length; i++) {
titles[i] = "SENS-"+i;
dbg_data[0][i] = src_img[i][0];
dbg_data[1][i] = result[i][0];
dbg_data[2][i] = diff_src_synth[i];
dbg_data[3][i] = weights[i];
}
ImagePlus imp= ShowDoubleFloatArrays.showArraysHyperstack(
dbg_data, // double[][][] pixels,
width, // int width,
title, // String title, "time_derivs-rt"+diff_time_rt+"-rxy"+diff_time_rxy,
titles, // String [] titles, // all slices*frames titles or just slice titles or null
titles_top, // String [] frame_titles, // frame titles or null
!batch_run); // boolean show)
if ((imp != null) && !batch_run) {
//refCLT
String suffix ="-BACKPROPAGATE";
center_CLT.saveImagePlusInModelDirectory(
suffix, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
ImagePlus imp= ShowDoubleFloatArrays.showArraysHyperstack(
dbg_data, // double[][][] pixels,
width, // int width,
title, // String title, "time_derivs-rt"+diff_time_rt+"-rxy"+diff_time_rxy,
titles, // String [] titles, // all slices*frames titles or just slice titles or null
titles_top, // String [] frame_titles, // frame titles or null
!batch_run); // boolean show)
if ((imp != null) && !batch_run) {
//refCLT
String suffix ="-BACKPROPAGATE-DIFF-WEIGHTS";
center_CLT.saveImagePlusInModelDirectory(
suffix, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
}
}
}
scene_CLT.setImageCenter(null); // disable back-propagate mode
return result;
return new double [][][] {diff_src_synth, weights}; // result;
}
public static ImagePlus saveShowFPNWeights(
double [][][] fpn_weights,
QuadCLT center_CLT,
String suffix,
boolean save,
boolean show) {
final int width = center_CLT.getTilesX()*center_CLT.getTileSize();
final int num_sens = center_CLT.getNumSensors();
String [] titles_top = {"FPN","FPN-WEIGHTS"};
String [] titles = new String [num_sens];
for (int nsens = 0; nsens < num_sens; nsens++) {
titles[nsens] = "SENSOR-"+nsens;
}
String title = center_CLT.getImageName()+suffix;
ImagePlus imp = ShowDoubleFloatArrays.showArraysHyperstack(
fpn_weights, // double[][][] pixels,
width, // int width,
title, // String title, "time_derivs-rt"+diff_time_rt+"-rxy"+diff_time_rxy,
titles, // String [] titles, // all slices*frames titles or just slice titles or null
titles_top, // String [] frame_titles, // frame titles or null
show); // boolean show)
if (save) {
center_CLT.saveImagePlusInModelDirectory(
suffix, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
}
return imp;
}
public static TpTask [] invertTask(
CLTParameters clt_parameters,
final QuadCLT center_CLT,
......@@ -891,7 +843,6 @@ public class CorrectionFPN {
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
// PolynomialApproximation pa = new PolynomialApproximation();
for (int nTile = ai.getAndIncrement(); nTile < direct_tasks.length; nTile = ai.getAndIncrement()){
TpTask tile = direct_tasks[nTile];
int tx = tile.getTileX();
......@@ -1127,7 +1078,7 @@ public class CorrectionFPN {
dbg_data[indx_inverse + indx_Y][nsens][ntile_marg] = inverse_data[nsens][1][ntile_marg];
}
}
ImagePlus imp = ShowDoubleFloatArrays.showArraysHyperstack(
ShowDoubleFloatArrays.showArraysHyperstack(
dbg_data, // double[][][] pixels,
tilesX_marg, // int width,
dbg_title, // String title, "time_derivs-rt"+diff_time_rt+"-rxy"+diff_time_rxy,
......@@ -1153,12 +1104,8 @@ public class CorrectionFPN {
final int num_tiles = tilesX * tilesY;
final int width = tilesX * tileSize;
final int height = tilesY * tileSize;
// final int tilex_marg_dbg = 25;
// final int tiley_marg_dbg = 61;
final int tile_marg_dbg = 5027;
final int tile_marg_dbg = -5027;
final double invert_tolerance2 = invert_tolerance * invert_tolerance;
// Rectangle full = new Rectangle(0,0,tilesX,tilesY);
// Rectangle inner = new Rectangle(margin,margin,tilesX-2* margin,tilesY-2* margin);
final double [][][] iapprox = new double [num_sensors][2][3];
for (int nsens = 0; nsens < num_sensors; nsens++) {
double det = approx[nsens][0][0]*approx[nsens][1][1]- approx[nsens][0][1]*approx[nsens][1][0];
......@@ -1315,56 +1262,49 @@ public class CorrectionFPN {
}
return xy;
}
public static TpTask[] setInverseTasks(
final double [][][] xy, // new double [tiles][sensors][2]. Last [16] sensor (if xy.length == 17) - centerXY. use disparity=0 - it will not be used
final int tilesX, //
final int num_sensors)//
{
final int task_code = (1 << GPUTileProcessor.TASK_CORR_EN) | (1 << GPUTileProcessor.TASK_INTER_EN);
final int tiles = xy.length;
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(00);
final AtomicInteger aTiles = new AtomicInteger(0);
final TpTask[] tp_tasks = new TpTask[tiles]; // aTiles.get()];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) if (xy[nTile] != null){
int tileY = nTile / tilesX;
int tileX = nTile % tilesX;
TpTask tp_task = new TpTask(num_sensors, tileX, tileY);
tp_task.task = task_code;
double disparity = 0;
tp_task.target_disparity = (float) disparity; // will it be used?
int indx_center = (xy[nTile].length > num_sensors) ? num_sensors : 0;
double [] centerXY = xy[nTile][indx_center];
tp_task.setCenterXY(centerXY); // this pair of coordinates will be used by GPU to set tp_task.xy and task.disp_dist!
tp_task.xy = new float [num_sensors][2];
for (int nsens = 0; nsens < num_sensors; nsens++) {
// if (nsens<8) {
tp_task.xy[nsens][0] = (float) xy[nTile][nsens][0];
tp_task.xy[nsens][1] = (float) xy[nTile][nsens][1];
// } else {
// double px = tileX * 8 + 4;
// double py = tileY * 8 + 4;
// tp_task.xy[nsens][0] = (float) px;
// tp_task.xy[nsens][1] = (float) py;
// }
}
tp_tasks[aTiles.getAndIncrement()] = tp_task;
}
}
};
}
ImageDtt.startAndJoin(threads);
final TpTask[] tp_tasks_out = new TpTask[aTiles.get()];
System.arraycopy(tp_tasks, 0, tp_tasks_out, 0, tp_tasks_out.length);
return tp_tasks_out;
}
public static TpTask[] setInverseTasks(
final double [][][] xy, // new double [tiles][sensors][2]. Last [16] sensor (if xy.length == 17) - centerXY. use disparity=0 - it will not be used
final int tilesX, //
final int num_sensors)//
{
final int task_code = (1 << GPUTileProcessor.TASK_CORR_EN) | (1 << GPUTileProcessor.TASK_INTER_EN);
final int tiles = xy.length;
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(00);
final AtomicInteger aTiles = new AtomicInteger(0);
final TpTask[] tp_tasks = new TpTask[tiles]; // aTiles.get()];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) if (xy[nTile] != null){
int tileY = nTile / tilesX;
int tileX = nTile % tilesX;
TpTask tp_task = new TpTask(num_sensors, tileX, tileY);
tp_task.task = task_code;
double disparity = 0;
tp_task.target_disparity = (float) disparity; // will it be used?
int indx_center = (xy[nTile].length > num_sensors) ? num_sensors : 0;
double [] centerXY = xy[nTile][indx_center];
tp_task.setCenterXY(centerXY); // this pair of coordinates will be used by GPU to set tp_task.xy and task.disp_dist!
tp_task.xy = new float [num_sensors][2];
for (int nsens = 0; nsens < num_sensors; nsens++) {
tp_task.xy[nsens][0] = (float) xy[nTile][nsens][0];
tp_task.xy[nsens][1] = (float) xy[nTile][nsens][1];
}
tp_tasks[aTiles.getAndIncrement()] = tp_task;
}
}
};
}
ImageDtt.startAndJoin(threads);
final TpTask[] tp_tasks_out = new TpTask[aTiles.get()];
System.arraycopy(tp_tasks, 0, tp_tasks_out, 0, tp_tasks_out.length);
return tp_tasks_out;
}
public static int getImageHeight(
double [][][] image_data,
int width) {
......@@ -1571,9 +1511,6 @@ public class CorrectionFPN {
row, // double [][] row,
col); // double [][] col)
scenes[nscene].applyRowCol();
// scenes[nscene], // final QuadCLT scene,
// row, // final double [][] row,
// col); // final double [][] col )
}
return;
}
......@@ -1787,5 +1724,127 @@ public class CorrectionFPN {
return imp;
}
/**
* Only uses color[0]
*/
public static double [][] photometric(
final double [][][] raw_img,
final double [][][] synth_img,
final int width,
final int order,
final int debugLevel){
final int num_sensors = raw_img.length;
final double [][] lwir_corr = new double [num_sensors][3];
final int num_pix = raw_img[0][0].length;
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
final int poly_debug = 0;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
double [][] pa_data = new double [num_pix][];
for (int nSens = ai.getAndIncrement(); nSens < num_sensors; nSens = ai.getAndIncrement()) {
Arrays.fill(pa_data, null);
int num_good = 0;
double avg = 0;
for (int npix = 0; npix < num_pix; npix++) {
double raw_d = raw_img[nSens][0][npix];
double synth_d = synth_img[nSens][0][npix];
if (!Double.isNaN(raw_d) && !Double.isNaN(synth_d)) {
pa_data[npix] = new double [] {synth_d,raw_d};
num_good++;
avg += (raw_d - synth_d);
}
}
avg /= num_good;
if (order > 0) {
double [] pa_coeff =(new PolynomialApproximation(poly_debug)).polynomialApproximation1d(pa_data, order);
double c = pa_coeff[0];
double b = pa_coeff[1];
double a = (pa_coeff.length > 2) ? pa_coeff[2] : 0.0;
double A = a;
double C = -c/b;
double d2 = b*b - 4*a*c;
C = (-b + Math.sqrt(d2))/(2 * a);
double B = 2 * C * a + b;
lwir_corr[nSens][0] = c; //C;
lwir_corr[nSens][1] = b; //B;
lwir_corr[nSens][2] = a; //A;
} else {
lwir_corr[nSens][0] = avg;
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return lwir_corr;
}
public static double [][] getFPNWeights(
final double [][][] synth_img,
final double um_sigma,
final int good_margins,
final double threshold,
final double w_blur,
final int width) {
final int num_sensors = synth_img.length;
final int num_pix = synth_img[0][0].length;
final int height = num_pix/width;
final double [][] weights = new double [num_sensors][num_pix];
final Rectangle rborder = new Rectangle(good_margins,good_margins,width - 2*good_margins, height - 2 * good_margins);
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
// unsharp max
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
DoubleGaussianBlur gb = new DoubleGaussianBlur();
for (int nSens = ai.getAndIncrement(); nSens < num_sensors; nSens = ai.getAndIncrement()) {
double [] pixels = synth_img[nSens][0];
double [] w = weights[nSens];
System.arraycopy(pixels,0,w,0,num_pix);
for (int npix = 0; npix < num_pix; npix++) if (Double.isNaN(pixels[npix])){
// bad_pix[npix] = true;
w[npix] = 0;
}
gb.blurDouble(
w, //
width, // terrain woi
height,
um_sigma, // double sigmaX,
um_sigma, // double sigmaY,
0.01); // double accuracy)
int npix=0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
double d = (pixels[npix] - w[npix])/threshold;
w[npix] = Math.max(1.0 - d * d, 0);
if (Double.isNaN(pixels[npix])) {
w[npix] = 0.0; // bad
} else if (!rborder.contains(x, y)) {
w[npix] = 1.0; // good
}
npix++;
}
}
gb.blurDouble(
w, //
width, // terrain woi
height,
w_blur, // double sigmaX,
w_blur, // double sigmaY,
0.01); // double accuracy)
}
}
};
}
ImageDtt.startAndJoin(threads);
return weights;
}
}
......@@ -697,16 +697,20 @@ public class Cuas {
final int num_colors = refCLT.getNumColors();
final int tilesX = refCLT.getTilesX();
final int tilesY = refCLT.getTilesY();
final double[][] tile_diffs = (debug_pxpyd && (cuasData != null)) ? new double [quadCLTs.length][tilesX*tilesY]:null;
// final double[][] tile_diffs = (debug_pxpyd && (cuasData != null)) ? new double [quadCLTs.length][tilesX*tilesY]:null;
final double[][] tile_diffs = (debug_pxpyd ) ? new double [quadCLTs.length][tilesX*tilesY]:null;
if (tile_diffs != null) {
for (int i = 0; i < tile_diffs.length; i++) {
Arrays.fill(tile_diffs[i], Double.NaN);
}
}
final double [][] dnum_vars = (debug_pxpyd && (cuasData != null)) ? new double [quadCLTs.length][tilesX*tilesY]:null;
// final double [][] dnum_vars = (debug_pxpyd && (cuasData != null)) ? new double [quadCLTs.length][tilesX*tilesY]:null;
final double [][] dnum_vars = (debug_pxpyd ) ? new double [quadCLTs.length][tilesX*tilesY]:null;
double [][][] dbg_PxPyD = debug_pxpyd? (new double [dbg_slices][quadCLTs.length][]):null;
double [][][] dbg_PxPyD_slice = debug_pxpyd? (new double [1][][]):null;
final float [][] dbg_fclt = debug_pxpyd ? new float [quadCLTs.length][] : null;
int dbg_scene = -95;
if (ref_pXpYD == null) {
ref_pXpYD = OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
......@@ -732,7 +736,6 @@ public class Cuas {
}
}
double [] stereo_atr = (stereo_atr_in != null)? stereo_atr_in: OpticalFlow.ZERO3; // maybe later play with rotated camera
boolean mode_cuas = (stereo_atr[0] != 0) || (stereo_atr[1] != 0) || (stereo_atr[2] != 0);
......@@ -758,7 +761,7 @@ public class Cuas {
num_colors, // int num_colors,
tilesX, // int width, // should be multiple of width
tilesY); // int height) {
boolean show_src = false; // cuas_debug;
final Thread[] threads = ImageDtt.newThreadArray(ImageDtt.THREADS_MAX);
final AtomicInteger ai = new AtomicInteger(0);
for (int nscene = 0; nscene < quadCLTs.length ; nscene++) if (quadCLTs[nscene] != null){
......@@ -766,6 +769,23 @@ public class Cuas {
if (nscene== dbg_scene) {
System.out.println("renderSceneSequence(): nscene = "+nscene);
}
if (show_src) {
int tile_size = refCLT.getTileSize();
String title = quadCLTs[nscene].getImageName()+"-SRC";
double [][] dbg_data = new double [quadCLTs[nscene].getImageData().length][];
String [] titles = new String [dbg_data.length];
for (int i = 0; i < dbg_data.length; i++) {
titles[i] = "SENSOR-"+i;
dbg_data[i] = quadCLTs[nscene].getImageData()[i][0];
}
ShowDoubleFloatArrays.showArrays(
dbg_data,
tilesX * tile_size,
tilesY * tile_size,
true,
title,
titles);
}
String ts = quadCLTs[nscene].getImageName();
final double dts = quadCLTs[nscene].getTimeStamp();
double [] scene_xyz = OpticalFlow.ZERO3;
......@@ -873,6 +893,9 @@ public class Cuas {
}
}
final float [] ffclt = fclt[0];
if (dbg_fclt != null) {
dbg_fclt[nscene] = ffclt;
}
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
......@@ -984,6 +1007,35 @@ public class Cuas {
}
ImageDtt.startAndJoin(threads);
*/
if (dbg_fclt != null) {
double [][] dbg_data = new double [quadCLTs.length][];
int tile_size = refCLT.getTileSize();
String suffix ="-DBG_FCLT_SEQ";
String [] titles = new String [quadCLTs.length];
String title = refCLT.getImageName() + suffix;
for (int nscene = 0; nscene < titles.length; nscene++) {
titles[nscene] = quadCLTs[nscene].getImageName();
dbg_data[nscene] = refCLT. convertCenterClt(
new float[][] {dbg_fclt[nscene]})[0];//float [][] fclt)
}
ImagePlus imp = ShowDoubleFloatArrays.makeArrays(
dbg_data, // double[][] pixels,
tilesX*tile_size,// int width,
tilesY*tile_size,// int height,
title, // String title,
titles); // String [] titles)
if (imp != null) {
if (!batch_run) {
imp.show();
}
refCLT.saveImagePlusInModelDirectory(
suffix, // String suffix, // null - use title from the imp
imp); // ImagePlus imp)
}
}
if (dbg_PxPyD != null) {
String [] debug_frame_titles = {"pX","pY","Disparity"};
String [] debug_titles = new String[quadCLTs.length];
......@@ -1054,6 +1106,7 @@ public class Cuas {
imp); // ImagePlus imp)
}
}
return newCuasData; // new float [][][] {sumFclt,sum_weights};
}
......
......@@ -1274,13 +1274,24 @@ public class GpuQuad{ // quad camera description
* Copy a set of images to the GPU (if they are new)
* @param force set even if there is no new Bayer data available
*/
public void setBayerImages(
boolean force) {
boolean force) {
setBayerImages(
force, // boolean force,
false); // boolean center)
}
public void setBayerImages(
boolean force,
boolean center) {
if (!force && this.gpuTileProcessor.bayer_set && !quadCLT.hasNewImageData()) {
return;
}
double [][] bayer_center = quadCLT.getImageCenter();
if (bayer_center != null) {
// if (bayer_center != null) {
if (center) {
quadCLT.getResetImageCenter();
setBayerImage(
bayer_center,
......@@ -1909,6 +1920,7 @@ public class GpuQuad{ // quad camera description
}
}
/**
* Direct CLT conversion and aberration correction
* Convert and save TD representation in either normal or reference scene. Reference scene TD representation
......@@ -1922,6 +1934,31 @@ public class GpuQuad{ // quad camera description
boolean ref_scene,
int [] wh,
int erase_clt) {
execConvertDirect(
ref_scene, // boolean ref_scene,
wh, // int [] wh,
erase_clt, // int erase_clt,
false, // boolean no_kernels)
false); // boolean use_center_image)
}
/**
* Direct CLT conversion and aberration correction
* Convert and save TD representation in either normal or reference scene. Reference scene TD representation
* is used for interscene correlation (for "IMU")
* @param ref_scene save result into a separate buffer for interscene correlation when true.
* @param wh window width, height (or null)
* @param erase_clt erase CLT data. Only needed before execImcltRbgAll() if not all the
* tiles are converted. <0 - do not erase, 0 - erase to 0, 1 - erase to NaN
* @param no_kernels skip deconvolution
* @param use_center_image use a single center image for all sensor (back propagation mode)
*/
public void execConvertDirect(
boolean ref_scene,
int [] wh,
int erase_clt,
boolean no_kernels,
boolean use_center_image) {
if (this.gpuTileProcessor.GPU_CONVERT_DIRECT_kernel == null)
{
IJ.showMessage("Error", "No GPU kernel: GPU_CONVERT_DIRECT_kernel");
......@@ -1932,14 +1969,18 @@ public class GpuQuad{ // quad camera description
IJ.showMessage("Error", "No GPU kernel: GPU_ERASE_CLT_TILES_kernel");
return;
}
boolean skip_kernels = rectilinear || (quadCLT == null) || no_kernels;
if (!rectilinear) {
// if ((quadCLT == null) || (quadCLT.getImageCenter() != null)) { // ????????????????
if ((quadCLT != null) && (quadCLT.getImageCenter() == null)) {
// if ((quadCLT != null) && (quadCLT.getImageCenter() == null)) {
if (!skip_kernels) {
setConvolutionKernels(false); // set kernels if they are not set already
}
setBayerImages(false); // set Bayer images if this.quadCLT instance has new ones
setBayerImages( // set Bayer images if this.quadCLT instance has new ones
false, // boolean force,
use_center_image); // boolean center)
}
boolean skip_kernels = rectilinear || (quadCLT == null) || (quadCLT.getImageCenter() != null) || !this.gpuTileProcessor.kernels_set || (quadCLT.no_kernels);
// boolean skip_kernels = rectilinear || (quadCLT == null) || (quadCLT.getImageCenter() != null) || !this.gpuTileProcessor.kernels_set || (quadCLT.no_kernels);
int [] wh1 = handleWH(
wh, // int [] wh_in,
......@@ -1976,31 +2017,6 @@ public class GpuQuad{ // quad camera description
}
}
Pointer kernelParameters;
// boolean rectilinear_or_back = rectilinear || (quadCLT == null) || (quadCLT.getImageCenter() != null);
/* if (rectilinear) {
kernelParameters = Pointer.to(
Pointer.to(new int[] { num_cams}), // int num_cams,
Pointer.to(new int[] { num_colors}), // int num_colors,
Pointer.to(gpu_bayer), // gpu_kernel_offsets), // just a legal pointer to gpu memory, will not be used
Pointer.to(gpu_bayer), // gpu_kernels), // just a legal pointer to gpu memory, will not be used
Pointer.to(gpu_bayer),
Pointer.to(gpu_ftasks),
Pointer.to(gpu_clt_selected), // gpu_clt), // select which one
Pointer.to(new int[] { mclt_stride }), // should be input image stride (in floats), not mclt!
Pointer.to(new int[] { num_task_tiles }),
// move lpf to 4-image generator kernel - DONE
Pointer.to(new int[] { 0 }), // lpf_mask
Pointer.to(new int[] { wh[0]}), // img_width}), // int woi_width,
Pointer.to(new int[] { wh[1]}), // img_height}), // int woi_height,
Pointer.to(new int[] { 0}), // int kernels_hor,
Pointer.to(new int[] { 0}), // int kernels_vert);
Pointer.to(gpu_active_tiles),
Pointer.to(gpu_num_active_tiles),
Pointer.to(new int[] { tilesX })
);
} else {// !rectilinear (normal way)
*/
kernelParameters = Pointer.to(
Pointer.to(new int[] { num_cams}), // int num_cams,
Pointer.to(new int[] { num_colors}), // int num_colors,
......
......@@ -1447,6 +1447,14 @@ public class ImageDtt extends ImageDttCPU {
int erase_clt) {
gpuQuad.execConvertDirect(use_reference_buffer, wh, erase_clt); // put results into a "reference" buffer
}
public void execConvertDirect(
boolean use_reference_buffer,
int [] wh,
int erase_clt,
boolean no_kernels,
boolean use_center_image){
gpuQuad.execConvertDirect(use_reference_buffer, wh, erase_clt, no_kernels, use_center_image); // put results into a "reference" buffer
}
public void preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
......
......@@ -674,7 +674,7 @@ min_str_neib_fpn 0.35
public double cuas_max_abs_rowcol = 100.0; // consider pixels with abs(UM difference) does not exceed this value
public double cuas_outliers_rowcol = 0.001; // scale weight of the outliers with high difference (to prevent undefined values
public boolean cuas_reset_first= false; // Reset average in first scene (for large diffirence in unfinished row/col)
public int cuas_invert_margins = 1; // Expand image each side when inverting tasks
public int cuas_invert_margins = 0; // Expand image each side when inverting tasks
public int cuas_invert_iters = 4; // Enhance inversion iterations
public double cuas_invert_tolerance = 0.001; // Finish enhancing when last change was lower than
public int cuas_invert_gap2 = 10; // Maximal dual gap size for inversion (depends on scanning radius in tiles) <0 = use maximal possible
......
......@@ -5186,11 +5186,11 @@ public class OpticalFlow {
}
}
} // while (blue_sky == null)
boolean early_try_back = true;
boolean early_try_back = false; // true;
if ((center_CLT != null) && center_CLT.hasCenterClt()) { // float [] fclt
int fpn_width = center_CLT.getTilesX() * center_CLT.getTileSize(); // see if center_CLT can be used
double [][][] fpn = null;
boolean condition_dsi = true;
boolean condition_dsi = false; // true;
boolean show_fpn = cuas_debug && !clt_parameters.batch_run; //
if (cuas_subtract_fpn) {
int discard_border = clt_parameters.imp.cuas_discard_border;
......@@ -5225,7 +5225,7 @@ public class OpticalFlow {
}
quadCLTs[scene_index].setImageCenter(center_CLT.getImageCenter());
if (early_try_back) {
double [][][] back_prop = CorrectionFPN.backPropagate_dbg(
double [][][] back_prop = CorrectionFPN.backPropagate(
clt_parameters, // CLTParameters clt_parameters,
discard_border, // final int discard_border,
max_fold, // final double max_fold,
......@@ -5241,6 +5241,8 @@ public class OpticalFlow {
}
}
fpn = center_CLT.getCorrectionFPN().readImageFPN ( -1); // int sens_mask);
boolean created_fpn = false;
double [][] fpn_weights = null;
if ((fpn == null) || cuas_calc_fpn) {
if (debugLevel >-3) {
System.out.println("Calculating FPN.");
......@@ -5248,13 +5250,74 @@ public class OpticalFlow {
int num_scenes = quadCLTs.length;
int rot_periods = (int) Math.floor(num_scenes/cuas_rot_period);
int rot_scenes = (int) Math.floor(rot_periods *cuas_rot_period);
int [] rot_range = {0, rot_scenes-1};
// Two full camera rotations to equalize contributions of different offsets
int [] rot_range = {0, rot_scenes-1}; // {0,174}; //{175,349}; // {0, 149}; // {0+140, rot_scenes-1+140};
/*
fpn = CorrectionFPN.calculateFPN(
quadCLTs, // final QuadCLT [] quadCLTs,
rot_range, // final int [] range, // required
-1, // final int sensor_mask,
debugLevel); // final int debugLevel)
*/
double [][][] fpn_and_weights = CorrectionFPN.backPropagate(
clt_parameters, // CLTParameters clt_parameters,
discard_border, // final int discard_border,
max_fold, // final double max_fold,
min_in_row_col, // final int min_in_row_col, // Minimal number of defined tiles in a row/column
cuas_invert_margins, // final int invert_margins, // 1 Expand image each side when inverting tasks
cuas_invert_gap2, // final int invert_gap2, // 10 // Maximal dual gap size for inversion (depends on scanning radius in tiles)
cuas_invert_iters, // final int invert_iters, // 4 Enhance inversion iterations
cuas_invert_tolerance, // final double invert_tolerance,// 0.001 Finish enhancing when last change was lower than
center_CLT, // final QuadCLT center_CLT,
quadCLTs, // final QuadCLT[] quadCLTs,
rot_range[0], // final int first_index,
rot_range[1], // final int last_index,
disparity_center, // double [] disparity_center
debugLevel); // final int debugLevel)
fpn = new double [fpn_and_weights[0].length][1][];
for (int nsens = 0; nsens < fpn.length; nsens++) {
fpn[nsens][0] = fpn_and_weights[0][nsens];
}
// remove later - here just to safe save.
center_CLT.getCorrectionFPN().saveShowFPN(
fpn,// double [][][] fpn,
fpn_width, // int width,
true, // boolean save,
show_fpn, // boolean show) {
QuadCLT.CENTER_FPN_SUFFIX+"-ORIG"); // String suffix)
double [][] image_row_avg = CorrectionFPN.getRowAvgMulti(
fpn, // final double [][][] image_data,
fpn_width, // final int width,
cuas_max_abs_rowcol, // final double max_abs, // only average within +/- max_abs
cuas_outliers_rowcol); // final double weight_outlier)
double [][] image_col_avg = CorrectionFPN.getColAvgMulti(
fpn, // final double [][][] image_data,
fpn_width, // final int width,
cuas_max_abs_rowcol, // final double max_abs, // only average within +/- max_abs
cuas_outliers_rowcol); // final double weight_outlier)
double [][][] fpn_out = CorrectionFPN.applyRowCol(
fpn, // final double [][][] image_data,
image_row_avg, // final double [][] image_row_avg,
image_col_avg, // final double [][] image_col_avg,
false); // final boolean inplace )
fpn = fpn_out;
center_CLT.getCorrectionFPN().saveShowFPN(
fpn,// double [][][] fpn,
fpn_width, // int width,
true, // boolean save,
show_fpn, // boolean show) {
QuadCLT.CENTER_FPN_SUFFIX+"-ROWCOL_RANGE"+rot_range[0]+"-"+rot_range[1]); // String suffix)
String fpn_weights_suffix="-FPN-WEIGHTS";
CorrectionFPN.saveShowFPNWeights(
fpn_and_weights, // double [][][] fpn_weights,
center_CLT, // QuadCLT center_CLT,
fpn_weights_suffix, // String suffix,
true, // boolean save,
show_fpn); // boolean show) {
created_fpn = true;
int dbg_sens = -12; // disable testing
if (cuas_debug && (dbg_sens >= 0)) {
center_CLT.getCorrectionFPN().debugFPN(
......@@ -5274,11 +5337,13 @@ public class OpticalFlow {
}
}
// center_CLT.setImageData(fpn); // included in center_CLT.setApplyFPN(). // setting FPN images to the virtual (center) scene
center_CLT.getCorrectionFPN().saveShowFPN(
fpn,// double [][][] fpn,
fpn_width, // int width,
true, // boolean save,
show_fpn); // boolean show) {
if (created_fpn || show_fpn) {
center_CLT.getCorrectionFPN().saveShowFPN(
fpn,// double [][][] fpn,
fpn_width, // int width,
created_fpn, // boolean save,
show_fpn); // boolean show) {
}
center_CLT.getCorrectionFPN().setApplyFPN(
quadCLTs, // QuadCLT [] quadCLTs,
fpn);// double [][][] fpn)
......
......@@ -6315,6 +6315,7 @@ public class QuadCLTCPU {
if ((imp == null) || (imp.getTitle() == null) || (imp.getTitle().equals(""))) {
return null;
}
System.out.println ("Read "+(imp.getStack().getSize())+" slices from "+file_path);
return readFloatArray(
imp, // ImagePlus imp,
num_slices, //int num_slices, // (0 - all)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment