Commit 3ca96c86 authored by Andrey Filippov's avatar Andrey Filippov

Converte conditioning to multithreaded, started noise comparison

parent 5b919b27
......@@ -41,6 +41,7 @@ import com.elphel.imagej.common.ShowDoubleFloatArrays;
import com.elphel.imagej.jp4.JP46_Reader_camera;
import com.elphel.imagej.readers.EyesisTiff;
import com.elphel.imagej.readers.ImagejJp4Tiff;
import com.elphel.imagej.tileprocessor.ImageDtt;
import ij.CompositeImage;
import ij.IJ;
......@@ -59,7 +60,7 @@ import loci.formats.FormatException;
public class EyesisCorrections {
public JP46_Reader_camera JP4_INSTANCE= new JP46_Reader_camera(false);
public ImagejJp4Tiff imagejJp4Tiff = new ImagejJp4Tiff();
// public ImagejJp4Tiff imagejJp4Tiff = new ImagejJp4Tiff();
ShowDoubleFloatArrays SDFA_INSTANCE= new ShowDoubleFloatArrays();
DebayerScissorsClass debayerScissors=null;
......@@ -253,7 +254,7 @@ public class EyesisCorrections {
int src_channel = correctionsParameters.getChannelFromSourceTiff(path);
int sub_camera = src_channel - correctionsParameters.firstSubCamera;
int subchannel= pixelMapping.getSubChannelSilent(sub_camera); // only used for demux
ImagejJp4Tiff imagejJp4Tiff = new ImagejJp4Tiff(); // override global
ImagePlus imp = null;
try {
imp = imagejJp4Tiff.readTiffJp4(
......@@ -602,8 +603,8 @@ public class EyesisCorrections {
}
public void createChannelVignetting(
boolean correct_vignetting){
final boolean correct_vignetting){
final int threadsMax = 100;
/// this.channelWidthHeight=new int [this.usedChannels.length][];
this.channelVignettingCorrection=new float [this.usedChannels.length][];
this.defectsXY=new int [this.usedChannels.length][][];
......@@ -616,17 +617,25 @@ public class EyesisCorrections {
this.defectsDiff[nChn]=null;
}
int [][] bayer={{1,0},{2,1}}; // GR/BG
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
ImagePlus imp=null,imp_composite=null;
for (int nFile=0;nFile<correctionsParameters.getSourcePaths().length;nFile++){
ImagejJp4Tiff imagejJp4Tiff = new ImagejJp4Tiff(); // override instance-global
//for (int nFile=0;nFile<correctionsParameters.getSourcePaths().length;nFile++){
for (int nFile = ai.getAndIncrement(); nFile < correctionsParameters.getSourcePaths().length; nFile = ai.getAndIncrement()) {
int [] channels={correctionsParameters.getChannelFromSourceTiff(correctionsParameters.getSourcePaths()[nFile])};
if (!this.pixelMapping.subcamerasUsed()) {
channels = this.pixelMapping.channelsForSubCamera(channels[0]-correctionsParameters.firstSubCameraConfig); // index in calibration files matching this source
if (!pixelMapping.subcamerasUsed()) {
channels = pixelMapping.channelsForSubCamera(channels[0]-correctionsParameters.firstSubCameraConfig); // index in calibration files matching this source
} else if (correctionsParameters.isJP4()){
int subCamera= channels[0]- correctionsParameters.firstSubCamera; // to match those in the sensor files
channels=this.pixelMapping.channelsForSubCamera(subCamera);
channels=pixelMapping.channelsForSubCamera(subCamera);
}
if (this.pixelMapping.isChannelAvailable(channels)) { //channels!=null) {
if (pixelMapping.isChannelAvailable(channels)) { //channels!=null) {
imp=null;
imp_composite=null;
......@@ -640,17 +649,17 @@ public class EyesisCorrections {
} // throws IOException, FormatException { // std - include non-elphel properties with prefix std
if ((imp==null) && (imp_composite==null)) {
if (this.debugLevel>0) System.out.println("createChannelVignetting(): can not open "+correctionsParameters.getSourcePaths()[nFile]+
if (debugLevel>0) System.out.println("createChannelVignetting(): can not open "+correctionsParameters.getSourcePaths()[nFile]+
" as "+(correctionsParameters.isJP4()?"JP4":"TIFF")+" file");
continue;
}
for (int chn=0;chn<channels.length;chn++) {
int srcChannel=channels[chn];
/// if ((this.channelWidthHeight[srcChannel]==null) && this.pixelMapping.isChannelAvailable(srcChannel)){
if (this.pixelMapping.isChannelAvailable(srcChannel)){
int subChannel=this.pixelMapping.getSubChannel(srcChannel);
/// if ((channelWidthHeight[srcChannel]==null) && pixelMapping.isChannelAvailable(srcChannel)){
if (pixelMapping.isChannelAvailable(srcChannel)){
int subChannel=pixelMapping.getSubChannel(srcChannel);
if (correct_vignetting) {
if (this.correctionsParameters.swapSubchannels01) {
if (correctionsParameters.swapSubchannels01) {
switch (subChannel){
case 0: subChannel=1; break;
case 1: subChannel=0; break;
......@@ -664,8 +673,8 @@ public class EyesisCorrections {
" channels.length="+channels.length);
for (int i=0;i<channels.length;i++) System.out.print(" "+channels[i]);
System.out.println();
for (int i=0;i<this.usedChannels.length;i++) if (this.usedChannels[i]) {
System.out.println(i+": subCamera="+this.pixelMapping.sensors[i].subcamera);
for (int i=0;i<usedChannels.length;i++) if (usedChannels[i]) {
System.out.println(i+": subCamera="+pixelMapping.sensors[i].subcamera);
}
}
......@@ -679,46 +688,46 @@ public class EyesisCorrections {
imp, // ImagePlus imp_src,
pixelMapping.sensors[srcChannel].getSensorWH(),
true); // boolean replicate);
this.channelVignettingCorrection[srcChannel]=this.pixelMapping.getBayerFlatFieldFloat(
channelVignettingCorrection[srcChannel]=pixelMapping.getBayerFlatFieldFloat(
srcChannel,
bayer,
1.5); // TODO: Make range configurable, improve FF interpolation in calibraion
} else { // no vignetting correction
int [] wh = this.pixelMapping.getSensorWH(srcChannel);
this.channelVignettingCorrection[srcChannel] = new float [wh[0]*wh[1]];
for (int i = 0; i < this.channelVignettingCorrection[srcChannel].length; i++) {
this.channelVignettingCorrection[srcChannel][i] = 1.0f;
int [] wh = pixelMapping.getSensorWH(srcChannel);
channelVignettingCorrection[srcChannel] = new float [wh[0]*wh[1]];
for (int i = 0; i < channelVignettingCorrection[srcChannel].length; i++) {
channelVignettingCorrection[srcChannel][i] = 1.0f;
}
}
if (this.debugLevel>0){
if (debugLevel>0){
SDFA_INSTANCE.showArrays(
this.channelVignettingCorrection[srcChannel],
this.pixelMapping.sensors[srcChannel].pixelCorrectionWidth,
this.pixelMapping.sensors[srcChannel].pixelCorrectionHeight,
channelVignettingCorrection[srcChannel],
pixelMapping.sensors[srcChannel].pixelCorrectionWidth,
pixelMapping.sensors[srcChannel].pixelCorrectionHeight,
"Vingetting-"+srcChannel
);
}
if (this.debugLevel>0){
if (debugLevel>0){
System.out.println("Created vignetting info for channel "+srcChannel+
" subchannel="+subChannel+" ("+
correctionsParameters.getSourcePaths()[nFile]+")");
/// System.out.println("imageWidth= "+this.channelWidthHeight[srcChannel][0]+" imageHeight="+this.channelWidthHeight[srcChannel][1]);
/// System.out.println("imageWidth= "+channelWidthHeight[srcChannel][0]+" imageHeight="+channelWidthHeight[srcChannel][1]);
}
this.defectsXY[srcChannel]=this.pixelMapping.getDefectsXY(srcChannel);
this.defectsDiff[srcChannel]=this.pixelMapping.getDefectsDiff(srcChannel);
if (this.debugLevel>0){
if (this.defectsXY[srcChannel]==null){
defectsXY[srcChannel]=pixelMapping.getDefectsXY(srcChannel);
defectsDiff[srcChannel]=pixelMapping.getDefectsDiff(srcChannel);
if (debugLevel>0){
if (defectsXY[srcChannel]==null){
System.out.println("No pixel defects info is available for channel "+srcChannel);
} else {
System.out.println("Extracted "+this.defectsXY[srcChannel].length+" pixel outliers for channel "+srcChannel+
System.out.println("Extracted "+defectsXY[srcChannel].length+" pixel outliers for channel "+srcChannel+
" (x:y:difference");
int numInLine=8;
for (int i=0;i<this.defectsXY[srcChannel].length;i++){
System.out.print(this.defectsXY[srcChannel][0]+":"+this.defectsXY[srcChannel][1]);
if ((this.defectsDiff[srcChannel]!=null) && (this.defectsDiff[srcChannel].length>i)){
System.out.print(":"+IJ.d2s(this.defectsDiff[srcChannel][i],3)+" ");
for (int i=0;i<defectsXY[srcChannel].length;i++){
System.out.print(defectsXY[srcChannel][0]+":"+defectsXY[srcChannel][1]);
if ((defectsDiff[srcChannel]!=null) && (defectsDiff[srcChannel].length>i)){
System.out.print(":"+IJ.d2s(defectsDiff[srcChannel][i],3)+" ");
}
if (((i%numInLine)==(numInLine-1)) || (i == (this.defectsXY[srcChannel].length-1))) System.out.println();
if (((i%numInLine)==(numInLine-1)) || (i == (defectsXY[srcChannel].length-1))) System.out.println();
}
}
}
......@@ -727,6 +736,10 @@ public class EyesisCorrections {
}
}
}
};
}
ImageDtt.startAndJoin(threads);
}
boolean [] usedChannels(String [] paths){
return usedChannels(paths, false);
......
......@@ -720,8 +720,10 @@ private Panel panel1,
addButton("Inter Accumulate", panelClt5, color_process);
addButton("Aux Inter Accumulate", panelClt5, color_process);
addButton("Inter Noise", panelClt5, color_process);
addButton("Inter Noise Aux", panelClt5, color_process);
addButton("Inter Debug Noise", panelClt5, color_report);
addButton("Noise Stats", panelClt5, color_process);
addButton("Noise Stats Aux", panelClt5, color_process);
addButton("Test 1D", panelClt5, color_process);
addButton("Colorize Depth", panelClt5, color_process);
plugInFrame.add(panelClt5);
......@@ -5203,15 +5205,23 @@ private Panel panel1,
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
CLT_PARAMETERS.batch_run = true;
intersceneNoise(false); // boolean bayer_artifacts_debug);
intersceneNoise(false, false); // boolean bayer_artifacts_debug);
return;
/* ======================================================================== */
} else if (label.equals("Inter Noise Aux")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
CLT_PARAMETERS.batch_run = true;
intersceneNoise(true, false); // boolean bayer_artifacts_debug);
return;
/* ======================================================================== */
} else if (label.equals("Inter Debug Noise")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
CLT_PARAMETERS.batch_run = true;
intersceneNoise(true); // boolean bayer_artifacts_debug);
intersceneNoise(false, true); // boolean bayer_artifacts_debug);
return;
/* ======================================================================== */
......@@ -5219,8 +5229,17 @@ private Panel panel1,
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
CLT_PARAMETERS.batch_run = true;
intersceneNoiseStats();
intersceneNoiseStats(false);
return;
/* ======================================================================== */
} else if (label.equals("Noise Stats Aux")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
CLT_PARAMETERS.batch_run = true;
intersceneNoiseStats(true);
return;
/* ======================================================================== */
} else if (label.equals("Colorize Depth")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
......@@ -7095,7 +7114,9 @@ private Panel panel1,
return true;
}
public boolean intersceneNoise(boolean bayer_artifacts_debug) {
public boolean intersceneNoise(
boolean use_aux,
boolean bayer_artifacts_debug) {
long startTime=System.nanoTime();
// load needed sensor and kernels files
if (!prepareRigImages()) return false;
......@@ -7105,7 +7126,7 @@ private Panel panel1,
if (DEBUG_LEVEL > -2){
System.out.println("++++++++++++++ Testing Interscene processing ++++++++++++++");
}
/*
if (CLT_PARAMETERS.useGPU()) { // only init GPU instances if it is used
if (GPU_TILE_PROCESSOR == null) {
try {
......@@ -7132,15 +7153,62 @@ private Panel panel1,
QUAD_CLT.setGPU(GPU_QUAD);
}
}
*/
if (CLT_PARAMETERS.useGPU()) { // only init GPU instances if it is used
if (GPU_TILE_PROCESSOR == null) {
try {
GPU_TILE_PROCESSOR = new GPUTileProcessor(CORRECTION_PARAMETERS.tile_processor_gpu);
} catch (Exception e) {
System.out.println("Failed to initialize GPU class");
// TODO Auto-generated catch block
e.printStackTrace();
return false;
} //final int debugLevel);
}
if (use_aux) {
if (CLT_PARAMETERS.useGPU(true) && (QUAD_CLT_AUX != null) && (GPU_QUAD_AUX == null)) { // if GPU AUX is needed
try {
GPU_QUAD_AUX = new GpuQuad(//
GPU_TILE_PROCESSOR, QUAD_CLT_AUX,
4,
3);
} catch (Exception e) {
System.out.println("Failed to initialize GpuQuad class");
// TODO Auto-generated catch block
e.printStackTrace();
return false;
} //final int debugLevel);
QUAD_CLT_AUX.setGPU(GPU_QUAD_AUX);
}
} else {
if (CLT_PARAMETERS.useGPU(false) && (QUAD_CLT != null) && (GPU_QUAD == null)) { // if GPU main is needed
try {
GPU_QUAD = new GpuQuad(
GPU_TILE_PROCESSOR, QUAD_CLT,
4,
3);
} catch (Exception e) {
System.out.println("Failed to initialize GpuQuad class");
// TODO Auto-generated catch block
e.printStackTrace();
return false;
} //final int debugLevel);
QUAD_CLT.setGPU(GPU_QUAD);
}
}
}
QuadCLT quadCLT = use_aux ? QUAD_CLT_AUX : QUAD_CLT;
ColorProcParameters colorProcParameters = use_aux ? COLOR_PROC_PARAMETERS_AUX : COLOR_PROC_PARAMETERS;
try {
TWO_QUAD_CLT.intersceneNoise(
QUAD_CLT, // QuadCLT quadCLT_main,
quadCLT, // QuadCLT quadCLT_main,
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
DEBAYER_PARAMETERS, // EyesisCorrectionParameters.DebayerParameters debayerParameters,
colorProcParameters, // EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, // CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, // EyesisCorrectionParameters.RGBParameters rgbParameters,
EQUIRECTANGULAR_PARAMETERS, // EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
PROPERTIES, // Properties properties,
bayer_artifacts_debug, // boolean bayer_artifacts_debug
......@@ -7164,7 +7232,7 @@ private Panel panel1,
return true;
}
public boolean intersceneNoiseStats() {
public boolean intersceneNoiseStats(boolean use_aux) {
long startTime=System.nanoTime();
// load needed sensor and kernels files
if (!prepareRigImages()) return false;
......@@ -7174,7 +7242,7 @@ private Panel panel1,
if (DEBUG_LEVEL > -2){
System.out.println("++++++++++++++ Testing Interscene processing ++++++++++++++");
}
/*
if (CLT_PARAMETERS.useGPU()) { // only init GPU instances if it is used
if (GPU_TILE_PROCESSOR == null) {
try {
......@@ -7201,13 +7269,60 @@ private Panel panel1,
QUAD_CLT.setGPU(GPU_QUAD);
}
}
*/
if (CLT_PARAMETERS.useGPU()) { // only init GPU instances if it is used
if (GPU_TILE_PROCESSOR == null) {
try {
GPU_TILE_PROCESSOR = new GPUTileProcessor(CORRECTION_PARAMETERS.tile_processor_gpu);
} catch (Exception e) {
System.out.println("Failed to initialize GPU class");
// TODO Auto-generated catch block
e.printStackTrace();
return false;
} //final int debugLevel);
}
if (use_aux) {
if (CLT_PARAMETERS.useGPU(true) && (QUAD_CLT_AUX != null) && (GPU_QUAD_AUX == null)) { // if GPU AUX is needed
try {
GPU_QUAD_AUX = new GpuQuad(//
GPU_TILE_PROCESSOR, QUAD_CLT_AUX,
4,
3);
} catch (Exception e) {
System.out.println("Failed to initialize GpuQuad class");
// TODO Auto-generated catch block
e.printStackTrace();
return false;
} //final int debugLevel);
QUAD_CLT_AUX.setGPU(GPU_QUAD_AUX);
}
} else {
if (CLT_PARAMETERS.useGPU(false) && (QUAD_CLT != null) && (GPU_QUAD == null)) { // if GPU main is needed
try {
GPU_QUAD = new GpuQuad(
GPU_TILE_PROCESSOR, QUAD_CLT,
4,
3);
} catch (Exception e) {
System.out.println("Failed to initialize GpuQuad class");
// TODO Auto-generated catch block
e.printStackTrace();
return false;
} //final int debugLevel);
QUAD_CLT.setGPU(GPU_QUAD);
}
}
}
QuadCLT quadCLT = use_aux ? QUAD_CLT_AUX : QUAD_CLT;
ColorProcParameters colorProcParameters = use_aux ? COLOR_PROC_PARAMETERS_AUX : COLOR_PROC_PARAMETERS;
try {
TWO_QUAD_CLT.intersceneNoiseStats(
QUAD_CLT, // QuadCLT quadCLT_main,
quadCLT, // QUAD_CLT, // QuadCLT quadCLT_main,
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
colorProcParameters, // COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
EQUIRECTANGULAR_PARAMETERS, // EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
......
......@@ -16693,8 +16693,6 @@ public class ImageDttCPU {
final int width,
final TpTask [] tp_tasks,
final TpTask [] tp_tasks_target, // null or wider array to provide target disparity for neighbors
// final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
// final double disparity_corr, // apply to disparity array data only, tp_tasks are already corrected
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
// dcorr_td should be either null, or double [tp_tasks.length][][];
final double [][][][] dcorr_td, // [tile][pair][4][64] sparse by pair transform domain representation of corr pairs
......@@ -16703,9 +16701,6 @@ public class ImageDttCPU {
final GeometryCorrection geometryCorrection,
final int kernel_step,
final int window_type,
// final double disparity_corr, should be aapplied to tp_tasks already!
final double corr_red,
final double corr_blue,
// related to tilt
......
......@@ -27,6 +27,7 @@ import java.awt.Rectangle;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.DoubleAccumulator;
......@@ -3180,16 +3181,13 @@ public class OpticalFlow {
}
String [] scene_names = scenes_list.toArray(new String [0]);
// get list of all other scenes
// int num_scenes = sts.length + 1;
int num_scenes = scene_names.length + 1;
int indx_ref = num_scenes - 1;
QuadCLT [] scenes = new QuadCLT [num_scenes];
scenes[indx_ref] = ref_scene;
// for (int i = 0; i < sts.length; i++) {
for (int i = 0; i < scene_names.length; i++) {
scenes[i] = ref_scene.spawnQuadCLTWithNoise( // spawnQuadCLT(
// sts[i],
scene_names[i],
clt_parameters,
colorProcParameters, //
......@@ -3208,6 +3206,17 @@ public class OpticalFlow {
scenes, // final QuadCLT [] scenes,
indx_ref, // final int indx_ref,
debug_level-2); // final int debug_level);
// re-read ref's INTER if possible
if (ref_scene.restoreDSI( // if there is already calculated with interscene - us it
"-DSI_INTER",
true // silent
) >=0 ) {
System.out.println("IntersceneAccumulate(): Using previously calculated interscene DSI (*-DSI_INTER) as initial DSI");
combo_dsn[0] = ref_scene.dsi[ref_scene.is_aux?TwoQuadCLT.DSI_DISPARITY_AUX:TwoQuadCLT.DSI_DISPARITY_MAIN];
combo_dsn[1] = ref_scene.dsi[ref_scene.is_aux?TwoQuadCLT.DSI_STRENGTH_AUX:TwoQuadCLT.DSI_STRENGTH_MAIN];
}
final double [][] combo_dsn_change = new double [combo_dsn.length+1][];
for (int i = 0; i < combo_dsn.length; i++) {
combo_dsn_change[i] = combo_dsn[i];
......@@ -3254,14 +3263,13 @@ public class OpticalFlow {
refine_titles[last_initial_slices + i * max_refines + nrefine ] = combo_dsn_titles[i]+"-"+nrefine;
}
}
double [][] disparity_map = null;
for (int nrefine = 0; nrefine < max_refines; nrefine++) {
Runtime.getRuntime().gc();
System.out.println("--- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
double [][] disparity_map =
// double [][][][][] clt_corr_partial =
correlateInterscene(
disparity_map = correlateInterscene(
clt_parameters, // final CLTParameters clt_parameters,
scenes, // final QuadCLT [] scenes,
indx_ref, // final int indx_ref,
......@@ -3323,6 +3331,22 @@ public class OpticalFlow {
tilesX, // int width,
tilesY); // int height)
// save _DSI_INTER - sema format, as _DSI_MAIN, it will be used instead of _DSI_MAIN next time
double [][] dsi = new double [TwoQuadCLT.DSI_SLICES.length][];
dsi[ref_scene.is_aux?TwoQuadCLT.DSI_DISPARITY_AUX:TwoQuadCLT.DSI_DISPARITY_MAIN] = combo_dsn_change[0];
dsi[ref_scene.is_aux?TwoQuadCLT.DSI_STRENGTH_AUX:TwoQuadCLT.DSI_STRENGTH_MAIN] = combo_dsn_change[1];
double [] disp_lma = disparity_map[ImageDtt.DISPARITY_INDEX_POLY];
if (disp_lma != null) {
int indx_lma = ref_scene.is_aux?TwoQuadCLT.DSI_DISPARITY_AUX_LMA:TwoQuadCLT.DSI_DISPARITY_MAIN_LMA;
dsi[indx_lma] = combo_dsn_change[0].clone();
for (int i = 0; i < disp_lma.length; i++) {
if (Double.isNaN(disp_lma[i])) {
dsi[indx_lma][i] = Double.NaN;
}
}
}
ref_scene.saveDSIAll ( "-DSI_INTER",dsi); // delete/rename this file to start iterations from reference lma
// save combo_dsn_change to model directory
if (debug_level >-100) {
return;
......@@ -5463,7 +5487,6 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
final ErsCorrection ers_reference = ref_scene.getErsCorrection();
final int tilesX = ref_scene.getTileProcessor().getTilesX();
final int tilesY = ref_scene.getTileProcessor().getTilesY();
// final int tiles =tilesX * tilesY;
final int num_pairs = Correlation2d.getNumPairs(ref_scene.getNumSensors());
final double [][][][][] dcorr_td_acc = new double[num_pairs][][][][];
final float [][][][] fcorr_td_acc = new float [tilesY][tilesX][][];
......@@ -5488,8 +5511,6 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
clt_parameters.getScaleStrength(ref_scene.isAux()),
ref_scene.getGPU());
// init Correlation2d
image_dtt.getCorrelation2d(); // initiate image_dtt.correlation2d, needed if disparity_map != null
double[][] disparity_map = new double [image_dtt.getDisparityTitles().length][];
......@@ -5609,8 +5630,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
//Correlation2d correlation2d =
image_dtt.getCorrelation2d();
int mcorr_sel = ImageDtt.corrSelEncode(clt_parameters.img_dtt,scenes[nscene].getNumSensors());
double [][][][][] dcorr_td = new double[num_pairs][][][][]; // correlation2d.getCorrTitles().length][][][][]; // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
//num_pairs
double [][][][] dcorr_td = new double[tp_tasks.length][][][]; // [tile][pair][4][64] sparse by pair transform domain representation of corr pairs
image_dtt.quadCorrTD(
scenes[nscene].getImageData(), // final double [][][] image_data, // first index - number of image in a quad
scenes[nscene].getErsCorrection().getSensorWH()[0], // final int width,
......@@ -5631,8 +5651,9 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
debug_level); //final int globalDebugLevel)
accumulateCorrelations(
tp_tasks, // final TpTask [] tp_tasks,
num_acc, // final int [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair]
dcorr_td, // final double [][][][][] dcorr_td, // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
dcorr_td, // final double [][][][][] dcorr_td, // [tile][pair][4][64] sparse transform domain representation of corr pairs
dcorr_td_acc); // final double [][][][][] dcorr_td_acc // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
if ((nscene == indx_ref) && show_reference_correlations) { // prepare 2d correlations for visualization, double/CPU mode
......@@ -5642,18 +5663,21 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
tp_tasks_ref, // final TpTask [] tp_tasks, // data from the reference frame - will be applied to LMW for the integrated correlations
// only listed tiles will be processed
ref_scene.getErsCorrection().getRXY(false), // final double [][] rXY, // from geometryCorrection
tilesX, // final int tilesX, // tp_tasks may lack maximal tileX, tileY
tilesY, // final int tilesY,
// no fcorr_combo_td here both arrays should have same non-null tiles
dcorr_td, // final double [][][][][] dcorr_td, // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
dcorr_td, // final double [][][][] dcorr_td, // [tile][pair][4][64] sparse by pair transform domain representation of corr pairs
null, // final double [] dcorr_weight, // [tile] weighted number of tiles averaged (divide squared fat zero by this)
// next both can be nulls
null, // final double [][][][] clt_corr_out, // sparse (by the first index) [type][tilesY][tilesX][(2*transform_size-1)*(2*transform_size-1)] or null
null, // final double [][][][] clt_combo_out, // sparse (by the first index) [>=1][tilesY][tilesX][(combo_tile_size] or null
// to be converted to float
dcorr_tiles, // final double [][][] dcorr_tiles, // [tile][pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// When clt_mismatch is non-zero, no far objects extraction will be attempted
//optional, may be null
disparity_map, // final double [][] disparity_map, // [8][tilesY][tilesX], only [6][] is needed on input or null - do not calculate
true, // final boolean run_lma, // calculate LMA, false - CM only
// last 2 - contrast, avg/ "geometric average)
scaled_fat_zero, // clt_parameters.getGpuFatZero(ref_scene.isMonochrome()), // final double afat_zero2, // gpu_fat_zero ==30? clt_parameters.getGpuFatZero(is_mono); absolute fat zero, same units as components squared values
clt_parameters.getGpuFatZero(ref_scene.isMonochrome()), // clt_parameters.getGpuFatZero(ref_scene.isMonochrome()), // final double afat_zero2, // gpu_fat_zero ==30? clt_parameters.getGpuFatZero(is_mono); absolute fat zero, same units as components squared values
clt_parameters.gpu_sigma_m, // final double corr_sigma, //
// define combining of all 2D correlation pairs for CM (LMA does not use them)
......@@ -5667,12 +5691,9 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
clt_parameters.tileY, // final int debug_tileY,
threadsMax, // final int threadsMax, // maximal number of threads to launch
debug_level -1 ); // final int globalDebugLevel)
image_dtt.convertFcltCorr(
dcorr_tiles, // double [][][] dcorr_tiles,// [tile][sparse, correlation pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
fclt_corr); // float [][][] fclt_corr) // new float [tilesX * tilesY][][] or null
}
}
......@@ -5688,6 +5709,14 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
threadsMax, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel)
String [] titles = new String [dbg_corr_rslt_partial.length]; // dcorr_tiles[0].length];
int ind_length = image_dtt.getCorrelation2d().getCorrTitles().length;
System.arraycopy(image_dtt.getCorrelation2d().getCorrTitles(), 0, titles, 0, ind_length);
for (int i = ind_length; i < titles.length; i++) {
titles[i] = "combo-"+(i - ind_length);
}
// titles.length = 15, corr_rslt_partial.length=16!
(new ShowDoubleFloatArrays()).showArrays( // out of boundary 15
dbg_corr_rslt_partial,
......@@ -5695,7 +5724,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
tilesY*(2*image_dtt.transform_size),
true,
ref_scene.getImageName()+"-CORR-REFSCENE-"+nrefine,
image_dtt.getCorrelation2d().getCorrTitles()); //CORR_TITLES);
titles); // image_dtt.getCorrelation2d().getCorrTitles()); //CORR_TITLES);
}
......@@ -5715,9 +5744,6 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
dcorr_td_acc); // final double [][][][][] dcorr_td_acc // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
}
// final float [][][] fclt_corr = new float [tilesX * tilesY][][]; // not all used
if (ref_scene.hasGPU()) {
image_dtt.clt_process_tl_correlations_GPU( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
......@@ -5746,26 +5772,41 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
debug_level -1 );
} else {
// double [][][][][] dcorr_td = image_dtt.convertCorrTd( // may return null if nothing to convert
// fcorr_td); // float [][][][] fcorr_td);
double [][][][] dcorr_td = new double[tp_tasks_ref.length][][][]; // [tile][pair][4][64] sparse transform domain representation of corr pairs
double [] dcorr_weight = new double[tp_tasks_ref.length];
for (int iTile = 0; iTile < dcorr_td.length; iTile++) {
TpTask task = tp_tasks_ref[iTile];
int tileY = task.getTileY(); // tilesX;
int tileX = task.getTileX(); // nTile % tilesX;
dcorr_td[iTile] = new double [num_pairs][][];
for (int npair = 0; npair < num_pairs; npair++) if (dcorr_td_acc[npair] != null){
dcorr_td[iTile][npair] = dcorr_td_acc[npair][tileY][tileX];
dcorr_weight[iTile] = num_acc[tileY][tileX][npair]; // number of accumulated tiles [tilesY][tilesX][pair]
}
}
double [][][] dcorr_tiles = (fclt_corr != null)? (new double [tp_tasks_ref.length][][]):null;
image_dtt.clt_process_tl_correlations( // convert to pixel domain and process correlations already prepared in fcorr_td and/or fcorr_combo_td
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks_ref, // final TpTask [] tp_tasks, // data from the reference frame - will be applied to LMW for the integrated correlations
// only listed tiles will be processed
ref_scene.getErsCorrection().getRXY(false), // final double [][] rXY, // from geometryCorrection
tilesX, // final int tilesX, // tp_tasks may lack maximal tileX, tileY
tilesY, // final int tilesY,
// no fcorr_combo_td here both arrays should have same non-null tiles
dcorr_td_acc, // final double [][][][][] dcorr_td, // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
dcorr_td, // final double [][][][] dcorr_td, // [tile][pair][4][64] sparse by pair transform domain representation of corr pairs
dcorr_weight, // final double [] dcorr_weight, // [tile] weighted number of tiles averaged (divide squared fat zero by this)
// next both can be nulls
null, // final double [][][][] clt_corr_out, // sparse (by the first index) [type][tilesY][tilesX][(2*transform_size-1)*(2*transform_size-1)] or null
null, // final double [][][][] clt_combo_out, // sparse (by the first index) [>=1][tilesY][tilesX][(combo_tile_size] or null
// to be converted to float
dcorr_tiles, // final double [][][] dcorr_tiles, // [tile][pair][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// When clt_mismatch is non-zero, no far objects extraction will be attempted
//optional, may be null
disparity_map, // final double [][] disparity_map, // [8][tilesY][tilesX], only [6][] is needed on input or null - do not calculate
true, // final boolean run_lma, // calculate LMA, false - CM only
// last 2 - contrast, avg/ "geometric average)
scaled_fat_zero, // clt_parameters.getGpuFatZero(ref_scene.isMonochrome()), // final double afat_zero2, // gpu_fat_zero ==30? clt_parameters.getGpuFatZero(is_mono); absolute fat zero, same units as components squared values
clt_parameters.getGpuFatZero(ref_scene.isMonochrome()), // clt_parameters.getGpuFatZero(ref_scene.isMonochrome()), // final double afat_zero2, // gpu_fat_zero ==30? clt_parameters.getGpuFatZero(is_mono); absolute fat zero, same units as components squared values
clt_parameters.gpu_sigma_m, // final double corr_sigma, //
// define combining of all 2D correlation pairs for CM (LMA does not use them)
......@@ -5786,7 +5827,6 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
}
Runtime.getRuntime().gc();
System.out.println("--- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
if (show_accumulated_correlations){ // -1
......@@ -5800,19 +5840,25 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
clt_parameters.corr_border_contrast, // final double border_contrast,
threadsMax, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel)
String [] titles = new String [dbg_corr_rslt_partial.length]; // dcorr_tiles[0].length];
int ind_length = image_dtt.getCorrelation2d().getCorrTitles().length;
// titles.length = 15, corr_rslt_partial.length=16!
System.arraycopy(image_dtt.getCorrelation2d().getCorrTitles(), 0, titles, 0, ind_length);
for (int i = ind_length; i < titles.length; i++) {
titles[i] = "combo-"+(i - ind_length);
}
(new ShowDoubleFloatArrays()).showArrays( // out of boundary 15
dbg_corr_rslt_partial,
tilesX*(2*image_dtt.transform_size),
tilesY*(2*image_dtt.transform_size),
true,
ref_scene.getImageName()+"-CORR-ACCUM"+num_scenes+"-"+nrefine,
image_dtt.getCorrelation2d().getCorrTitles()); //CORR_TITLES);
titles); // image_dtt.getCorrelation2d().getCorrTitles()); //CORR_TITLES);
}
return disparity_map; // disparity_map
}
@Deprecated
public void accumulateCorrelations(
final int [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair]
final double [][][][][] dcorr_td, // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
......@@ -5868,6 +5914,67 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
ImageDtt.startAndJoin(threads);
}
public void accumulateCorrelations(
final TpTask [] tp_tasks,
final int [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair]
final double [][][][] dcorr_td, // [tile][pair][4][64] sparse transform domain representation of corr pairs
final double [][][][][] dcorr_td_acc // [pair][tilesY][tilesX][4][64] sparse transform domain representation of corr pairs
) {
final int tilesY = num_acc.length;
final int tilesX = num_acc[0].length;
final int num_pairs = num_acc[0][0].length;
final AtomicBoolean [] acorrs = new AtomicBoolean[num_pairs];
for (int i = 0; i < acorrs.length; i++) {
acorrs[i] = new AtomicBoolean();
}
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int iTile = ai.getAndIncrement(); iTile < tp_tasks.length; iTile = ai.getAndIncrement()) if (dcorr_td[iTile] != null) {
for (int pair = 0; pair < num_pairs; pair++) if (dcorr_td[iTile][pair] != null){
acorrs[pair].set(true);
}
}
}
};
}
ImageDtt.startAndJoin(threads);
for (int pair = 0; pair < acorrs.length; pair++) if ((dcorr_td_acc[pair] == null) && acorrs[pair].get()) {
dcorr_td_acc[pair] = new double[tilesY][tilesX][][];
}
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int iTile = ai.getAndIncrement(); iTile < tp_tasks.length; iTile = ai.getAndIncrement()) if ((tp_tasks[iTile] != null) && (tp_tasks[iTile].getTask() != 0)) {
int tileY = tp_tasks[iTile].getTileY(); // tilesX;
int tileX = tp_tasks[iTile].getTileX(); // nTile % tilesX;
for (int pair = 0; pair < num_pairs; pair++) if ((dcorr_td[iTile] != null) && (dcorr_td[iTile][pair] != null)){
if (dcorr_td_acc[pair][tileY][tileX] == null) {
dcorr_td_acc[pair][tileY][tileX] = new double [dcorr_td[iTile][pair].length][]; // 4
for (int q = 0; q < dcorr_td_acc[pair][tileY][tileX].length; q++) {
dcorr_td_acc[pair][tileY][tileX][q] = dcorr_td[iTile][pair][q].clone();
}
} else {
for (int q = 0; q < dcorr_td_acc[pair][tileY][tileX].length; q++) {
for (int i = 0; i < dcorr_td_acc[pair][tileY][tileX][q].length; i++) {
dcorr_td_acc[pair][tileY][tileX][q][i] += dcorr_td[iTile][pair][q][i];
}
}
}
num_acc[tileY][tileX][pair]++;
}
}
}
};
}
ImageDtt.startAndJoin(threads);
}
// GPU (float) version
public void accumulateCorrelations(
final int [][][] num_acc, // number of accumulated tiles [tilesY][tilesX][pair]
......
......@@ -2017,7 +2017,7 @@ public class QuadCLT extends QuadCLTCPU {
true); //newAllowed, // save
String file_name = image_name + suffix;
String file_path = x3d_path + Prefs.getFileSeparator() + file_name + ".tiff";
if (getGPU().getQuadCLT() != this) {
if ((getGPU() != null) && (getGPU().getQuadCLT() != this)) {
getGPU().updateQuadCLT(this); // to re-load new set of Bayer images to the GPU
}
......@@ -2034,8 +2034,8 @@ public class QuadCLT extends QuadCLTCPU {
threadsMax, // final int threadsMax, // maximal number of threads to launch
false, // final boolean updateStatus,
debugLevel); // final int debugLevel);
FileSaver fs=new FileSaver(img_noise);
fs.saveAsTiff(file_path);
// FileSaver fs=new FileSaver(img_noise); // is null, will be saved inside to /home/elphel/lwir16-proc/proc1/results_cuda/1626032208_613623-AUX-SHIFTED-D0.0
// fs.saveAsTiff(file_path);
}
public ImagePlus processCLTQuadCorrGPU(
......@@ -2054,7 +2054,7 @@ public class QuadCLT extends QuadCLTCPU {
if (gpuQuad == null) {
System.out.println("GPU instance is not initialized, using CPU mode");
processCLTQuadCorrCPU(
imp_quad, // ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
// imp_quad, // ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null // Not needed use this.saturation_imp
clt_parameters, // CLTParameters clt_parameters,
debayerParameters, // EyesisCorrectionParameters.DebayerParameters debayerParameters,
......
......@@ -47,7 +47,9 @@ import java.util.List;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.DoubleAccumulator;
import org.checkerframework.checker.units.qual.m;
......@@ -295,14 +297,21 @@ public class QuadCLTCPU {
return image_name;
}
public int restoreDSI(String suffix) // "-DSI_COMBO", "-DSI_MAIN" (DSI_COMBO_SUFFIX, DSI_MAIN_SUFFIX)
public int restoreDSI(
String suffix,
boolean silent) // "-DSI_COMBO", "-DSI_MAIN" (DSI_COMBO_SUFFIX, DSI_MAIN_SUFFIX)
{
this.dsi = new double [TwoQuadCLT.DSI_SLICES.length][];
return restoreDSI(suffix,dsi);
return restoreDSI(
suffix,
dsi,
silent);
}
public int restoreDSI(String suffix, // "-DSI_COMBO", "-DSI_MAIN" (DSI_COMBO_SUFFIX, DSI_MAIN_SUFFIX)
double [][] dsi) {
public int restoreDSI(
String suffix, // "-DSI_COMBO", "-DSI_MAIN" (DSI_COMBO_SUFFIX, DSI_MAIN_SUFFIX)
double [][] dsi,
boolean silent) {
String x3d_path= correctionsParameters.selectX3dDirectory( // for x3d and obj
correctionsParameters.getModelName(image_name), // quad timestamp. Will be ignored if correctionsParameters.use_x3d_subdirs is false
correctionsParameters.x3dModelVersion,
......@@ -313,12 +322,22 @@ public class QuadCLTCPU {
try {
imp = new ImagePlus(file_path);
} catch (Exception e) {
if (!silent) {
System.out.println ("Failed to open "+file_path);
}
return -1;
}
if (imp.getWidth()==0) { // file not found
if (!silent) {
System.out.println ("Failed to open "+file_path);
}
return -1;
}
System.out.println("restoreDSI(): got "+imp.getStackSize()+" slices from file: "+file_path);
if (imp.getStackSize() < 2) {
if (!silent) {
System.out.println ("Failed to read "+file_path);
}
return -1;
}
int num_slices_read = 0;
......@@ -601,7 +620,6 @@ public class QuadCLTCPU {
{
final int debugLevelInner=clt_parameters.batch_run? -2: debugLevel;
// String set_name = image_name; // prevent from being overwritten?
String jp4_copy_path= correctionsParameters.selectX3dDirectory(
this.image_name, // quad timestamp. Will be ignored if correctionsParameters.use_x3d_subdirs is false
correctionsParameters.jp4SubDir,
......@@ -640,7 +658,15 @@ public class QuadCLTCPU {
threadsMax,
1); // debugLevel); // final int debug_level)
}
restoreDSI("-DSI_MAIN"); // "-DSI_COMBO", "-DSI_MAIN" (DSI_COMBO_SUFFIX, DSI_MAIN_SUFFIX)
// try to restore DSI generated from interscene if available, if not use single-scene -DSI_MAIN
if (restoreDSI(
"-DSI_INTER",
true // silent
) < 0) {
restoreDSI(
"-DSI_MAIN", // "-DSI_COMBO", "-DSI_MAIN" (DSI_COMBO_SUFFIX, DSI_MAIN_SUFFIX)
false); // silent
}
restoreInterProperties( // restore properties for interscene processing (extrinsics, ers, ...)
null, // String path, // full name with extension or null to use x3d directory
false, // boolean all_properties,// null, // Properties properties, // if null - will only save extrinsics)
......@@ -717,6 +743,29 @@ public class QuadCLTCPU {
}
ImageDtt.startAndJoin(threads);
}
if (isLwir()) {
for (int q = 0; q < num_cams; q++) {
final int fq = q;
double s1 = 0.0, s2 = 0.0;
for (int i =0; i < image_data[q][0].length; i++) {
s1 += image_data[q][0][i];
s2 += image_data[q][0][i] * image_data[q][0][i];
}
double s0 = image_data[q][0].length;
final double sb = 2.0 * Math.sqrt(s2*s0 - s1*s1) / s0; // 2.0 - to match calculation for RGB (average value)
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int i = ai.getAndIncrement(); i < noise[0].length; i = ai.getAndIncrement()) {
noise[fq][i] *= sb;
}
}
};
}
ImageDtt.startAndJoin(threads);
}
} else {
for (int q = 0; q < num_cams; q++) {
final int fq = q;
double [] sc = new double [num_cols];
......@@ -725,6 +774,21 @@ public class QuadCLTCPU {
sc[c] += image_data[q][c][i];
}
}
if (isMonochrome()) {
final double sb = sc[0]/num_pix;
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int i = ai.getAndIncrement(); i < noise[0].length; i = ai.getAndIncrement()) {
noise[fq][i] *= sb;
}
}
};
}
ImageDtt.startAndJoin(threads);
} else {
final double [][] sb = {
{sc[2] * 2.0 / num_pix, sc[0] * 4.0 / num_pix},
{sc[1] * 4.0 / num_pix, sc[2] * 2.0 / num_pix}};
......@@ -742,6 +806,8 @@ public class QuadCLTCPU {
}
ImageDtt.startAndJoin(threads);
}
}
}
imp = saveDoubleArrayInModelDirectory(
noise_suffix, // String suffix,
null, // String [] labels, // or null
......@@ -909,6 +975,7 @@ public class QuadCLTCPU {
}
public void saveDSIAll(
String suffix, // "-DSI_MAIN"
double [][] dsi) // DSI_SLICES.length
{
String x3d_path= correctionsParameters.selectX3dDirectory( // for x3d and obj
......@@ -916,11 +983,7 @@ public class QuadCLTCPU {
correctionsParameters.x3dModelVersion,
true, // smart,
true); //newAllowed, // save
String title = image_name+"-DSI_MAIN";
// String [] titles = {TwoQuadCLT.DSI_SLICES[TwoQuadCLT.DSI_DISPARITY_MAIN], TwoQuadCLT.DSI_SLICES[TwoQuadCLT.DSI_STRENGTH_MAIN]};
// double [][] dsi_main = {dsi[TwoQuadCLT.DSI_DISPARITY_MAIN], dsi[TwoQuadCLT.DSI_STRENGTH_MAIN]};
// ImagePlus imp = (new ShowDoubleFloatArrays()).makeArrays(dsi_main, tp.getTilesX(), tp.getTilesY(), title, titles);
String title = image_name+suffix; // "-DSI_MAIN";
ImagePlus imp = (new ShowDoubleFloatArrays()).makeArrays(dsi, tp.getTilesX(), tp.getTilesY(), title, TwoQuadCLT.DSI_SLICES);
eyesisCorrections.saveAndShow(
imp, // ImagePlus imp,
......@@ -4314,13 +4377,21 @@ public class QuadCLTCPU {
boolean lwir_subtract_dc = colorProcParameters.lwir_subtract_dc;
boolean lwir_eq_chn = colorProcParameters.lwir_eq_chn;
boolean correct_vignetting = colorProcParameters.correct_vignetting;
// this.is_mono = isMonochrome(); // is_lwir; // maybe add other monochrome?
for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final AtomicBoolean aReturnNull = new AtomicBoolean(false);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
// for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
for (int srcChannel = ai.getAndIncrement(); srcChannel < channelFiles.length; srcChannel = ai.getAndIncrement()) {
int nFile=channelFiles[srcChannel]; // channelFiles[srcChannel];
imp_srcs[srcChannel]=null;
if (nFile >=0){
imp_srcs[srcChannel] = eyesisCorrections.getJp4Tiff(sourceFiles[nFile], this.geometryCorrection.woi_tops, this.geometryCorrection.camera_heights);
imp_srcs[srcChannel] = eyesisCorrections.getJp4Tiff(sourceFiles[nFile], geometryCorrection.woi_tops, geometryCorrection.camera_heights);
scaleExposures[srcChannel] = 1.0;
if (!(referenceExposures == null) && !Double.isNaN(referenceExposures[nFile]) && (imp_srcs[srcChannel].getProperty("EXPOSURE")!=null)){
......@@ -4338,7 +4409,7 @@ public class QuadCLTCPU {
imp_srcs[srcChannel].setProperty("channel", srcChannel); // it may already have channel
imp_srcs[srcChannel].setProperty("path", sourceFiles[nFile]); // it may already have channel
if (this.correctionsParameters.pixelDefects && (eyesisCorrections.defectsXY!=null)&& (eyesisCorrections.defectsXY[srcChannel]!=null)){
if (correctionsParameters.pixelDefects && (eyesisCorrections.defectsXY!=null)&& (eyesisCorrections.defectsXY[srcChannel]!=null)){
// apply pixel correction
int numApplied= eyesisCorrections.correctDefects( // not used in lwir
imp_srcs[srcChannel],
......@@ -4353,9 +4424,9 @@ public class QuadCLTCPU {
int height = imp_srcs[srcChannel].getHeight();
if ((debugLevel > -1) && (!isMonochrome())) {
double [] max_pix= {0.0, 0.0, 0.0, 0.0};
// for (int y = 0; y < height-1; y+=2){
// for (int y = 0; y < height-1; y+=2){
for (int y = 0; (y < 499) && (y < height); y+=2){
// for (int x = 0; x < width-1; x+=2){
// for (int x = 0; x < width-1; x+=2){
for (int x = width/2; x < width-1; x+=2){
if (pixels[y*width+x ] > max_pix[0]) max_pix[0] = pixels[y*width+x ];
if (pixels[y*width+x+ 1] > max_pix[1]) max_pix[1] = pixels[y*width+x+ 1];
......@@ -4398,18 +4469,19 @@ public class QuadCLTCPU {
}
if (!is_lwir) { // no vigneting correction and no color scaling
if (this.correctionsParameters.vignetting && correct_vignetting){
if (correctionsParameters.vignetting && correct_vignetting){
if ((eyesisCorrections.channelVignettingCorrection==null) || (srcChannel<0) || (srcChannel>=eyesisCorrections.channelVignettingCorrection.length) || (eyesisCorrections.channelVignettingCorrection[srcChannel]==null)){
if (debugLevel > -3) {
System.out.println("No vignetting data for channel "+srcChannel);
}
return null; // not used in lwir
aReturnNull.set(true);
continue; // return null;
}
/// float [] pixels=(float []) imp_srcs[srcChannel].getProcessor().getPixels();
float [] vign_pixels = eyesisCorrections.channelVignettingCorrection[srcChannel];
if (pixels.length!=vign_pixels.length){
// System.out.println("Vignetting data for channel "+srcChannel+" has "+vign_pixels.length+" pixels, image "+sourceFiles[nFile]+" has "+pixels.length);
// System.out.println("Vignetting data for channel "+srcChannel+" has "+vign_pixels.length+" pixels, image "+sourceFiles[nFile]+" has "+pixels.length);
int woi_width = Integer.parseInt((String) imp_srcs[srcChannel].getProperty("WOI_WIDTH"));
int woi_height = Integer.parseInt((String) imp_srcs[srcChannel].getProperty("WOI_HEIGHT"));
int woi_top = Integer.parseInt((String) imp_srcs[srcChannel].getProperty("WOI_TOP"));
......@@ -4426,17 +4498,20 @@ public class QuadCLTCPU {
if (vign_width < (woi_left + woi_width)) {
System.out.println("Vignetting data for channel "+srcChannel+
" has width + left ("+(woi_left+woi_width)+") > vign_width ("+vign_width+")");
return null;
aReturnNull.set(true);
continue; // return null;
}
if (vign_height < (woi_top + woi_height)) {
System.out.println("Vignetting data for channel "+srcChannel+
" has height + top ("+(woi_top+woi_height)+") > vign_height ("+vign_width+")");
return null;
aReturnNull.set(true);
continue; // return null;
}
if (pixels.length != woi_width * woi_height){
System.out.println("Vignetting data for channel "+srcChannel+" has "+vign_pixels.length+" pixels, < "+
sourceFiles[nFile]+" has "+pixels.length);
return null;
aReturnNull.set(true);
continue; // return null;
}
vign_pixels = new float[woi_width * woi_height];
for (int row = 0; row < woi_height; row++) {
......@@ -4501,12 +4576,26 @@ public class QuadCLTCPU {
}
}
}
} // (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
}
};
}
ImageDtt.startAndJoin(threads);
if (aReturnNull.get()) {
return null;
};
// temporary applying scaleExposures[srcChannel] here, setting it to all 1.0
if (debugLevel > -2) {
System.out.println("Temporarily applying scaleExposures[] here - 1" );
}
for (int srcChannel=0; srcChannel<channelFiles.length; srcChannel++){
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
// for (int srcChannel=0; srcChannel<channelFiles.length; srcChannel++){
for (int srcChannel = ai.getAndIncrement(); srcChannel < channelFiles.length; srcChannel = ai.getAndIncrement()) {
if (!is_lwir) {
float [] pixels=(float []) imp_srcs[srcChannel].getProcessor().getPixels();
for (int i = 0; i < pixels.length; i++){
......@@ -4514,8 +4603,12 @@ public class QuadCLTCPU {
}
}
scaleExposures[srcChannel] = 1.0;
}
}
};
}
ImageDtt.startAndJoin(threads);
if ((debugLevel > -1) && (saturation_imp != null) && !is_lwir){
String [] titles = {"chn0","chn1","chn2","chn3"};
......@@ -4560,7 +4653,7 @@ public class QuadCLTCPU {
// once per quad here
// may need to equalize gains between channels
if (!is_lwir && (clt_parameters.gain_equalize || clt_parameters.colors_equalize)){ // false, true
channelGainsEqualize(
channelGainsEqualize( // TODO: not multithreaded - convert
clt_parameters.gain_equalize, //false
clt_parameters.colors_equalize, // true
clt_parameters.nosat_equalize, // boolean nosat_equalize, // true
......@@ -4576,6 +4669,7 @@ public class QuadCLTCPU {
imp_srcs,
lwir_subtract_dc, // boolean remove_dc,
set_name, // just for debug messages == setNames.get(nSet)
threadsMax,
debugLevel);
int num_avg = 0;
this.lwir_offset = 0.0;
......@@ -4594,20 +4688,29 @@ public class QuadCLTCPU {
this.saturation_imp = saturation_imp;
image_data = new double [imp_srcs.length][][];
this.new_image_data = true;
for (int i = 0; i < image_data.length; i++){
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
//for (int i = 0; i < image_data.length; i++){
for (int i = ai.getAndIncrement(); i < image_data.length; i = ai.getAndIncrement()) {
image_data[i] = eyesisCorrections.bayerToDoubleStack(
imp_srcs[i], // source Bayer image, linearized, 32-bit (float))
null, // no margins, no oversample
isMonochrome()); // is_mono);
// TODO: Scale greens here ?
// if (!is_mono && (image_data[i].length > 2)) {
// TODO: Scale greens here ?
// if (!is_mono && (image_data[i].length > 2)) {
if (!isMonochrome() && (image_data[i].length > 2)) {
for (int j =0 ; j < image_data[i][0].length; j++){
image_data[i][2][j]*=0.5; // Scale green 0.5 to compensate more pixels than R,B
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
setTiles (imp_srcs[0], // set global tp.tilesX, tp.tilesY
getNumSensors(), // tp.getNumSensors(),
clt_parameters,
......@@ -4657,7 +4760,8 @@ public class QuadCLTCPU {
boolean [][] saturation_imp = (clt_parameters.sat_level > 0.0)? new boolean[channelFiles.length][] : null;
double [] scaleExposures = new double[channelFiles.length];
ImagePlus [] imp_srcs = conditionImageSet(
// ImagePlus [] imp_srcs =
conditionImageSet(
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
colorProcParameters,
sourceFiles, // String [] sourceFiles,
......@@ -4672,7 +4776,7 @@ public class QuadCLTCPU {
// once per quad here
processCLTQuadCorrCPU( // returns ImagePlus, but it already should be saved/shown
imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
// imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
......@@ -5077,13 +5181,28 @@ public class QuadCLTCPU {
ImagePlus [] imp_srcs,
boolean remove_dc,
String setName, // just for debug messages == setNames.get(nSet)
final int threadsMax,
int debugLevel){
double [] offsets = new double [channelFiles.length];
double [][] avr_pix = new double [channelFiles.length][2]; // val/weight
double [] wnd_x = {};
double [] wnd_y = {};
double total_s = 0.0, total_w = 0.0;
for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
// double [] wnd_x = {};
// double [] wnd_y = {};
// double total_s = 0.0, total_w = 0.0;
DoubleAccumulator atotal_s = new DoubleAccumulator(Double::sum, 0L);
DoubleAccumulator atotal_w = new DoubleAccumulator(Double::sum, 0L);
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
double [] wnd_x;
double [] wnd_y;
// for (int srcChannel=0; srcChannel<channelFiles.length; srcChannel++){
for (int srcChannel = ai.getAndIncrement(); srcChannel < channelFiles.length; srcChannel = ai.getAndIncrement()) {
// for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
int nFile=channelFiles[srcChannel];
if (nFile >=0){
avr_pix[srcChannel][0] = 0.0;
......@@ -5091,18 +5210,18 @@ public class QuadCLTCPU {
float [] pixels=(float []) imp_srcs[srcChannel].getProcessor().getPixels();
int width = imp_srcs[srcChannel].getWidth();
int height = imp_srcs[srcChannel].getHeight();
if (wnd_x.length != width) {
// if (wnd_x.length != width) {
wnd_x = new double[width];
for (int i = 0; i < width; i++) {
wnd_x[i] = 0.5 - 0.5*Math.cos(2*Math.PI * (i+1) / (width + 1));
}
}
if (wnd_y.length != height) {
// }
// if (wnd_y.length != height) {
wnd_y = new double[height];
for (int i = 0; i < height; i++) {
wnd_y[i] = 0.5 - 0.5*Math.cos(2*Math.PI * (i+1) / (height + 1));
}
}
// }
int indx = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
......@@ -5111,22 +5230,36 @@ public class QuadCLTCPU {
avr_pix[srcChannel][1] += w;
}
}
total_s += avr_pix[srcChannel][0];
total_w += avr_pix[srcChannel][1];
// total_s += avr_pix[srcChannel][0];
// total_w += avr_pix[srcChannel][1];
atotal_s.accumulate(avr_pix[srcChannel][0]);
atotal_w.accumulate(avr_pix[srcChannel][1]);
avr_pix[srcChannel][0]/=avr_pix[srcChannel][1]; // weighted average
}
}
double avg = total_s/total_w;
}
};
}
ImageDtt.startAndJoin(threads);
// double avg = total_s/total_w;
double avg = atotal_s.get()/atotal_w.get();
if (!remove_dc) { // not used in lwir
for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++) if (channelFiles[srcChannel] >=0){
avr_pix[srcChannel][0] -= avg;
}
}
for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
// for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
for (int srcChannel = ai.getAndIncrement(); srcChannel < channelFiles.length; srcChannel = ai.getAndIncrement()) {
int nFile=channelFiles[srcChannel];
if (nFile >=0) {
// offsets[srcChannel]= (avr_pix[srcChannel][0] - (remove_dc ? 0.0: avg));
// offsets[srcChannel]= (avr_pix[srcChannel][0] - (remove_dc ? 0.0: avg));
offsets[srcChannel]= avr_pix[srcChannel][0];
float fd = (float)offsets[srcChannel];
float [] pixels = (float []) imp_srcs[srcChannel].getProcessor().getPixels();
......@@ -5135,12 +5268,16 @@ public class QuadCLTCPU {
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return offsets;
}
public ImagePlus [] processCLTQuadCorrCPU( // USED in lwir
ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
// public ImagePlus [] processCLTQuadCorrCPU( // USED in lwir
public void processCLTQuadCorrCPU( // USED in lwir
// ImagePlus [] imp_quad, // should have properties "name"(base for saving results), "channel","path"
boolean [][] saturation_imp, // (near) saturated pixels or null // Not needed use this.saturation_imp
CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
......@@ -5159,12 +5296,13 @@ public class QuadCLTCPU {
ShowDoubleFloatArrays sdfa_instance = new ShowDoubleFloatArrays(); // just for debugging?
// may use this.StartTime to report intermediate steps execution times
/*
ImagePlus [] results = new ImagePlus[imp_quad.length];
for (int i = 0; i < results.length; i++) {
results[i] = imp_quad[i];
results[i].setTitle(results[i].getTitle()+"RAW");
}
*/
if (debugLevel>1) System.out.println("processing: "+image_path);
ImageDtt image_dtt = new ImageDtt(
getNumSensors(),
......@@ -5262,7 +5400,7 @@ public class QuadCLTCPU {
clt_combo_dbg, // final double [][][][] clt_combo_dbg, // generate sparse partial rotated/scaled pairs
disparity_map, // [2][tp.tilesY * tp.tilesX]
texture_tiles, // [tp.tilesY][tp.tilesX]["RGBA".length()][];
imp_quad[0].getWidth(), // final int width,
geometryCorrection.getSensorWH()[0], // imp_quad[0].getWidth(), // final int width,
clt_parameters.getFatZero(isMonochrome()), // add to denominator to modify phase correlation (same units as data1, data2). <0 - pure sum
clt_parameters.corr_sym,
clt_parameters.corr_offset,
......@@ -5603,7 +5741,7 @@ public class QuadCLTCPU {
debugLevel);
for (int ii = 0; ii < clt_set.length; ii++) clt[chn*4+ii] = clt_set[ii];
}
/*
if (debugLevel > 0){
sdfa_instance.showArrays(clt,
tilesX*image_dtt.transform_size,
......@@ -5611,6 +5749,7 @@ public class QuadCLTCPU {
true,
results[iQuad].getTitle()+"-CLT-D"+clt_parameters.disparity);
}
*/
}
iclt_data[iQuad] = new double [clt_data[iQuad].length][];
......@@ -5624,12 +5763,14 @@ public class QuadCLTCPU {
debugLevel);
}
/*
if (clt_parameters.gen_chn_stacks) sdfa_instance.showArrays(
iclt_data[iQuad],
(tilesX + 0) * image_dtt.transform_size,
(tilesY + 0) * image_dtt.transform_size,
true,
results[iQuad].getTitle()+"-ICLT-RGB-D"+clt_parameters.disparity);
*/
} // end of generating shifted channel images
......@@ -5674,7 +5815,7 @@ public class QuadCLTCPU {
iclt_data[iQuad],
tilesX * image_dtt.transform_size,
tilesY * image_dtt.transform_size,
scaleExposures[iQuad], // double scaleExposure, // is it needed?
(scaleExposures == null) ? 1.0 : scaleExposures[iQuad], // double scaleExposure, // is it needed?
debugLevel );
}
......@@ -5695,7 +5836,7 @@ public class QuadCLTCPU {
if (imps_RGB[slice_seq[i]] != null) {
array_stack.addSlice("port_"+slice_seq[i], imps_RGB[slice_seq[i]].getProcessor().getPixels());
} else { // not used in lwir
array_stack.addSlice("port_"+slice_seq[i], results[slice_seq[i]].getProcessor().getPixels());
/// array_stack.addSlice("port_"+slice_seq[i], results[slice_seq[i]].getProcessor().getPixels());
}
}
ImagePlus imp_stack = new ImagePlus(image_name+sAux()+"-SHIFTED-D"+clt_parameters.disparity, array_stack);
......@@ -5814,8 +5955,7 @@ public class QuadCLTCPU {
}
}
}
return results;
// return results;
}
public ImagePlus [] processCLTQuadCorrTestERS(
......@@ -13278,7 +13418,7 @@ public class QuadCLTCPU {
}
if (correctionsParameters.clt_batch_4img){ // not used in lwir
processCLTQuadCorrCPU( // returns ImagePlus, but it already should be saved/shown
imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
// imp_srcs, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
......
......@@ -9329,7 +9329,7 @@ if (debugLevel > -100) return true; // temporarily !
if (updateStatus) IJ.showStatus("CPU: Rendering 4 image set (disparity = 0) for "+quadCLT_main.image_name+ "and a thumb nail");
quadCLT_main.processCLTQuadCorrCPU( // returns ImagePlus, but it already should be saved/shown
imp_srcs_main, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
// imp_srcs_main, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp_main, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
......@@ -9469,8 +9469,10 @@ if (debugLevel > -100) return true; // temporarily !
} else { // if (quadCLT_main.correctionsParameters.clt_batch_explore) {
int num_restored = 0;
try {
num_restored = quadCLT_main.restoreDSI(DSI_MAIN_SUFFIX, // "-DSI_COMBO", "-DSI_MAIN"
dsi);
num_restored = quadCLT_main.restoreDSI(
DSI_MAIN_SUFFIX, // "-DSI_COMBO", "-DSI_MAIN"
dsi,
false);
} catch (Exception e) {
......@@ -9622,7 +9624,7 @@ if (debugLevel > -100) return true; // temporarily !
if (updateStatus) IJ.showStatus("Rendering 4 AUX image set (disparity = 0) for "+quadCLT_aux.image_name);
quadCLT_aux.processCLTQuadCorrCPU( // returns ImagePlus, but it already should be saved/shown
imp_srcs_aux, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
// imp_srcs_aux, // [srcChannel], // should have properties "name"(base for saving results), "channel","path"
saturation_imp_aux, // boolean [][] saturation_imp, // (near) saturated pixels or null
clt_parameters,
debayerParameters,
......@@ -9674,7 +9676,9 @@ if (debugLevel > -100) return true; // temporarily !
dsi[DSI_DISPARITY_AUX_LMA] = aux_last_scan[2];
// quadCLT_main.saveDSIMain (dsi);
quadCLT_aux.saveDSIAll (dsi);
quadCLT_aux.saveDSIAll (
"-DSI_MAIN", // String suffix, // "-DSI_MAIN"
dsi);
if (clt_parameters.rig.ml_copyJP4) {
copyJP4src(
set_name, // String set_name
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment