Commit 3d083c29 authored by Andrey Filippov's avatar Andrey Filippov

Added infrastructure for testing with GPU code (currently implemented

DCT/DST-II/IV)
parent 6ad9d157
......@@ -155,6 +155,7 @@ private Panel panel1,
public static QuadCLT QUAD_CLT = null;
public static QuadCLT QUAD_CLT_AUX = null;
public static TwoQuadCLT TWO_QUAD_CLT = null;
public static GPUTileProcessor GPU_TILE_PROCESSOR = null;
public static EyesisCorrectionParameters.DebayerParameters DEBAYER_PARAMETERS = new EyesisCorrectionParameters.DebayerParameters(
64, // size //128;
......@@ -638,6 +639,7 @@ private Panel panel1,
panelClt_GPU.setLayout(new GridLayout(1, 0, 5, 5)); // rows, columns, vgap, hgap
addButton("JCUDA TEST", panelClt_GPU);
addButton("TF TEST", panelClt_GPU);
addButton("Rig8 gpu", panelClt_GPU, color_conf_process);
add(panelClt_GPU);
}
......@@ -4575,6 +4577,12 @@ private Panel panel1,
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
getPairImages2();
return;
/* ======================================================================== */
} else if (label.equals("Rig8 gpu")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
getPairImages2Gpu();
return;
/* ======================================================================== */
} else if (label.equals("RIG extrinsics")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
......@@ -4818,32 +4826,6 @@ private Panel panel1,
CHANNEL_GAINS_PARAMETERS_AUX = new CorrectionColorProc.ColorGainsParameters();
}
CHANNEL_GAINS_PARAMETERS_AUX.getProperties("CHANNEL_GAINS_PARAMETERS.", aux_properties);
/*
* public void copyPropertiesFrom(Properties other_properties, String other_prefix, String this_prefix){ // save
TODO: add some other parameters, like geometry
MASTER_DEBUG_LEVEL = Integer.parseInt(properties.getProperty("MASTER_DEBUG_LEVEL"));
UPDATE_STATUS= Boolean.parseBoolean(properties.getProperty("UPDATE_STATUS"));
SPLIT_PARAMETERS.getProperties("SPLIT_PARAMETERS.", properties);
DCT_PARAMETERS.getProperties("DCT_PARAMETERS.", properties);
CLT_PARAMETERS.getProperties("CLT_PARAMETERS.", properties);
DEBAYER_PARAMETERS.getProperties("DEBAYER_PARAMETERS.", properties);
// NONLIN_PARAMETERS.getProperties("NONLIN_PARAMETERS.", properties);
COLOR_PROC_PARAMETERS.getProperties("COLOR_PROC_PARAMETERS.", properties);
// COLOR_CALIB_PARAMETERS.getProperties("COLOR_CALIB_PARAMETERS.", properties);
RGB_PARAMETERS.getProperties("RGB_PARAMETERS.", properties);
PROCESS_PARAMETERS.getProperties("PROCESS_PARAMETERS.", properties);
CORRECTION_PARAMETERS.getProperties("CORRECTION_PARAMETERS.", properties);
CHANNEL_GAINS_PARAMETERS.getProperties("CHANNEL_GAINS_PARAMETERS.", properties);
EQUIRECTANGULAR_PARAMETERS.getProperties("EQUIRECTANGULAR_PARAMETERS.", properties);
POST_PROCESSING.getProperties("", properties);
CONVOLVE_FFT_SIZE=Integer.parseInt(properties.getProperty("CONVOLVE_FFT_SIZE"));
THREADS_MAX=Integer.parseInt(properties.getProperty("THREADS_MAX"));
GAUSS_WIDTH=Double.parseDouble(properties.getProperty("GAUSS_WIDTH"));
PSF_SUBPIXEL_SHOULD_BE_4=Integer.parseInt(properties.getProperty("PSF_SUBPIXEL_SHOULD_BE_4"));
if (QUAD_CLT != null) QUAD_CLT.getProperties();
*/
if (DEBUG_LEVEL > -1) System.out.println("Importing auxiliary camera parameters for its configuration (where it is main): "+path);
} else {
if (DEBUG_LEVEL > -10) System.out.println("Failed to import auxiliary configuration parameters");
......@@ -5143,6 +5125,65 @@ private Panel panel1,
return true;
}
public boolean getPairImages2Gpu() {
if (!prepareRigImages()) return false;
String configPath=getSaveCongigPath();
if (configPath.equals("ABORT")) return false;
if (DEBUG_LEVEL > -2){
System.out.println("++++++++++++++ Calculating combined correlations ++++++++++++++");
}
// reset if ran after 3d model to save memory
if (QUAD_CLT.tp != null) {
QUAD_CLT.tp.clt_3d_passes = null; // resetCLTPasses();
}
if (QUAD_CLT_AUX.tp != null) {
QUAD_CLT_AUX.tp.clt_3d_passes = null; // resetCLTPasses();
}
if (GPU_TILE_PROCESSOR == null) {
try {
GPU_TILE_PROCESSOR = new GPUTileProcessor();
} catch (Exception e) {
System.out.println("Failed to initialize GPU class");
// TODO Auto-generated catch block
e.printStackTrace();
} //final int debugLevel);
}
try {
TWO_QUAD_CLT.processCLTQuadCorrPairsGpu(
GPU_TILE_PROCESSOR,
QUAD_CLT, // QuadCLT quadCLT_main,
QUAD_CLT_AUX, // QuadCLT quadCLT_aux,
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
// CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
// CHANNEL_GAINS_PARAMETERS_AUX, //CorrectionColorProc.ColorGainsParameters channelGainParameters_aux,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} //final int debugLevel);
QUAD_CLT.tp.clt_3d_passes = null; // resetCLTPasses(); // so running "Ground truth" after would be OK
QUAD_CLT_AUX.tp.clt_3d_passes = null; //.resetCLTPasses();
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
}
return true;
}
public boolean rigPlanes() {
......
/**
** -----------------------------------------------------------------------------**
** GPUTileProcessor.java
**
** GPU acceleration for the Tile Processor
**
**
** Copyright (C) 2018 Elphel, Inc.
**
** -----------------------------------------------------------------------------**
**
** GPUTileProcessor.java is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
** -----------------------------------------------------------------------------**
**
*/
// Uses code by Marco Hutter - http://www.jcuda.org
import static jcuda.driver.JCudaDriver.cuCtxCreate;
import static jcuda.driver.JCudaDriver.cuCtxSynchronize;
import static jcuda.driver.JCudaDriver.cuDeviceGet;
import static jcuda.driver.JCudaDriver.cuInit;
import static jcuda.driver.JCudaDriver.cuLaunchKernel;
import static jcuda.driver.JCudaDriver.cuMemAllocPitch;
import static jcuda.driver.JCudaDriver.cuMemFree;
import static jcuda.driver.JCudaDriver.cuMemcpy2D;
import static jcuda.driver.JCudaDriver.cuModuleGetFunction;
import static jcuda.driver.JCudaDriver.cuModuleLoadData;
import static jcuda.nvrtc.JNvrtc.nvrtcCompileProgram;
import static jcuda.nvrtc.JNvrtc.nvrtcCreateProgram;
import static jcuda.nvrtc.JNvrtc.nvrtcDestroyProgram;
import static jcuda.nvrtc.JNvrtc.nvrtcGetPTX;
import static jcuda.nvrtc.JNvrtc.nvrtcGetProgramLog;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import ij.IJ;
import jcuda.Pointer;
import jcuda.Sizeof;
import jcuda.driver.CUDA_MEMCPY2D;
import jcuda.driver.CUcontext;
import jcuda.driver.CUdevice;
import jcuda.driver.CUdeviceptr;
import jcuda.driver.CUfunction;
import jcuda.driver.CUmemorytype;
import jcuda.driver.CUmodule;
import jcuda.driver.JCudaDriver;
import jcuda.nvrtc.JNvrtc;
import jcuda.nvrtc.nvrtcProgram;
public class GPUTileProcessor {
static String GPU_DTT24_NAME = "GPU_DTT24_DRV"; // this.kernelFunction = createFunction(sourceCode, "GPU_DTT24_DRV"); // "invert");
int DTTTEST_BLOCK_WIDTH = 32; // may be read from the source code
int DTTTEST_BLOCK_HEIGHT = 16; // may be read from the source code
int DTT_SIZE = 8; // may be read from the source code
private CUfunction GPU_DTT24_kernel = null;
public GPUTileProcessor() throws IOException
{
int su = setup();
if (su < 0) {
new IllegalArgumentException ("setup() returned "+su);
}
}
// run kernel with dttx
public void exec_dtt24(float src_pixels[],float dst_pixels[], int width, int dtt_mode)
{
if (GPU_DTT24_kernel == null)
{
IJ.showMessage("Error", "No GPU kernel");
return;
}
int height = src_pixels.length / width;
long width_in_bytes = width * Sizeof.FLOAT;
CUdeviceptr src_dpointer = new CUdeviceptr();
CUdeviceptr dst_dpointer = new CUdeviceptr();
long [] device_stride = new long [1];
cuMemAllocPitch (
src_dpointer, // CUdeviceptr dptr,
device_stride, // long[] pPitch,
width_in_bytes, // long WidthInBytes,
height, // long Height,
Sizeof.FLOAT); // int ElementSizeBytes)
int pitchInElements = (int)(device_stride[0] / Sizeof.FLOAT);
cuMemAllocPitch (
dst_dpointer, // CUdeviceptr dptr,
device_stride, // long[] pPitch,
width_in_bytes, // long WidthInBytes,
height, // long Height,
Sizeof.FLOAT); // int ElementSizeBytes)
CUDA_MEMCPY2D copyH2D = new CUDA_MEMCPY2D();
copyH2D.srcMemoryType = CUmemorytype.CU_MEMORYTYPE_HOST;
copyH2D.srcHost = Pointer.to(src_pixels);
copyH2D.srcPitch = width_in_bytes;
copyH2D.dstMemoryType = CUmemorytype.CU_MEMORYTYPE_DEVICE;
copyH2D.dstDevice = src_dpointer;
copyH2D.dstPitch = device_stride[0];
copyH2D.WidthInBytes = width_in_bytes;
copyH2D.Height = height; // /4;
// for copying results back to host
CUDA_MEMCPY2D copyD2H = new CUDA_MEMCPY2D();
copyD2H.srcMemoryType = CUmemorytype.CU_MEMORYTYPE_DEVICE;
copyD2H.srcDevice = dst_dpointer; // ((test & 1) ==0) ? src_dpointer : dst_dpointer; // copy same data
copyD2H.srcPitch = device_stride[0];
copyD2H.dstMemoryType = CUmemorytype.CU_MEMORYTYPE_HOST;
copyD2H.dstHost = Pointer.to(dst_pixels);
copyD2H.dstPitch = width_in_bytes;
copyD2H.WidthInBytes = width_in_bytes;
copyD2H.Height = height; // /2;
// Set up the kernel parameters: A pointer to an array
// of pointers which point to the actual values.
Pointer kernelParameters = Pointer.to(
Pointer.to(dst_dpointer),
Pointer.to(src_dpointer),
Pointer.to(new int[] { pitchInElements }),
Pointer.to(new int[] { dtt_mode })
);
int [] GridFullWarps = {width / DTTTEST_BLOCK_WIDTH, height / DTTTEST_BLOCK_HEIGHT, 1};
int [] ThreadsFullWarps = {DTT_SIZE, DTTTEST_BLOCK_WIDTH/DTT_SIZE, DTTTEST_BLOCK_HEIGHT/DTT_SIZE};
// Actual work starts here:
cuMemcpy2D(copyH2D);
cuCtxSynchronize();
// Call the kernel function
cuLaunchKernel(GPU_DTT24_kernel,
GridFullWarps[0], GridFullWarps[1], GridFullWarps[2], // Grid dimension
ThreadsFullWarps[0], ThreadsFullWarps[1],ThreadsFullWarps[2],// Block dimension
0, null, // Shared memory size and stream (shared - only dynamic, static is in code)
kernelParameters, null); // Kernel- and extra parameters
// Copy the data from the device back to the host
cuMemcpy2D(copyD2H);
// clean up
cuMemFree(src_dpointer);
cuMemFree(dst_dpointer);
}
public int setup() throws IOException // String arg, ImagePlus imagePlus)
{
// Enable exceptions and omit all subsequent error checks
JCudaDriver.setExceptionsEnabled(true);
JNvrtc.setExceptionsEnabled(true);
// Initialize the driver and create a context for the first device.
cuInit(0);
CUdevice device = new CUdevice();
cuDeviceGet(device, 0);
CUcontext context = new CUcontext();
cuCtxCreate(context, 0, device);
// Obtain the CUDA source code from the CUDA file
String cuFileName = "/home/eyesis/workspace-python3/nvidia_dct8x8/src/dtt8x8.cuh";// "dtt8x8.cuh";
String sourceCode = readFileAsString(cuFileName); // readResourceAsString(cuFileName);
if (sourceCode == null)
{
IJ.showMessage("Error",
"Could not read the kernel source code");
return -1;
}
// Create the kernel function
this.GPU_DTT24_kernel = createFunction(sourceCode, GPU_DTT24_NAME); // "invert");
return 0;
}
/**
* Create the CUDA function object for the kernel function with the
* given name that is contained in the given source code
*
* @param sourceCode The source code
* @param kernelName The kernel function name
* @return
* @throws IOException
*/
private static CUfunction createFunction(
String sourceCode, String kernelName) throws IOException
{
boolean OK = false;
// Use the NVRTC to create a program by compiling the source code
nvrtcProgram program = new nvrtcProgram();
nvrtcCreateProgram(
program, sourceCode, null, 0, null, null);
try {
nvrtcCompileProgram(program, 0, null);
OK = true;
} catch (Exception e) {
System.out.println("nvrtcCompileProgram() FAILED");
}
// Compilation log with errors/warnongs
String programLog[] = new String[1];
nvrtcGetProgramLog(program, programLog);
String log = programLog[0].trim();
if (!log.isEmpty())
{
System.err.println("Program compilation log:\n" + log);
}
if (!OK) {
throw new IOException("Could not compile program");
}
// Get the PTX code of the compiled program (not the binary)
String[] ptx = new String[1];
nvrtcGetPTX(program, ptx);
nvrtcDestroyProgram(program);
// Create a CUDA module from the PTX code
CUmodule module = new CUmodule();
cuModuleLoadData(module, ptx[0]);
// Find the function in the source by name, get its pointer
CUfunction function = new CUfunction();
cuModuleGetFunction(function, module, kernelName);
return function;
}
static String readFileAsString(String path)
{
byte[] encoded;
try {
encoded = Files.readAllBytes(Paths.get(path));
} catch (IOException e) {
return null;
}
return new String(encoded, StandardCharsets.UTF_8);
}
}
......@@ -319,8 +319,109 @@ public class TwoQuadCLT {
}
public void processCLTQuadCorrPairsGpu(
GPUTileProcessor gPUTileProcessor,
QuadCLT quadCLT_main,
QuadCLT quadCLT_aux,
EyesisCorrectionParameters.CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
// CorrectionColorProc.ColorGainsParameters channelGainParameters_main,
// CorrectionColorProc.ColorGainsParameters channelGainParameters_aux,
EyesisCorrectionParameters.RGBParameters rgbParameters,
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel) throws Exception
{
this.startTime=System.nanoTime();
String [] sourceFiles=quadCLT_main.correctionsParameters.getSourcePaths();
QuadCLT.SetChannels [] set_channels_main = quadCLT_main.setChannels(debugLevel);
QuadCLT.SetChannels [] set_channels_aux = quadCLT_aux.setChannels(debugLevel);
if ((set_channels_main == null) || (set_channels_main.length==0) || (set_channels_aux == null) || (set_channels_aux.length==0)) {
System.out.println("No files to process (of "+sourceFiles.length+")");
return;
}
double [] referenceExposures_main = quadCLT_main.eyesisCorrections.calcReferenceExposures(debugLevel); // multiply each image by this and divide by individual (if not NaN)
double [] referenceExposures_aux = quadCLT_aux.eyesisCorrections.calcReferenceExposures(debugLevel); // multiply each image by this and divide by individual (if not NaN)
for (int nSet = 0; nSet < set_channels_main.length; nSet++){
// check it is the same set for both cameras
if (set_channels_aux.length <= nSet ) {
throw new Exception ("Set names for cameras do not match: main camera: '"+set_channels_main[nSet].name()+"', aux. camera: nothing");
}
if (!set_channels_main[nSet].name().equals(set_channels_aux[nSet].name())) {
throw new Exception ("Set names for cameras do not match: main camera: '"+set_channels_main[nSet].name()+"', aux. camera: '"+set_channels_main[nSet].name()+"'");
}
int [] channelFiles_main = set_channels_main[nSet].fileNumber();
int [] channelFiles_aux = set_channels_aux[nSet].fileNumber();
boolean [][] saturation_imp_main = (clt_parameters.sat_level > 0.0)? new boolean[channelFiles_main.length][] : null;
boolean [][] saturation_imp_aux = (clt_parameters.sat_level > 0.0)? new boolean[channelFiles_main.length][] : null;
double [] scaleExposures_main = new double[channelFiles_main.length];
double [] scaleExposures_aux = new double[channelFiles_main.length];
ImagePlus [] imp_srcs_main = quadCLT_main.conditionImageSet(
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
sourceFiles, // String [] sourceFiles,
set_channels_main[nSet].name(), // String set_name,
referenceExposures_main, // double [] referenceExposures,
channelFiles_main, // int [] channelFiles,
scaleExposures_main, //output // double [] scaleExposures
saturation_imp_main, //output // boolean [][] saturation_imp,
debugLevel); // int debugLevel);
ImagePlus [] imp_srcs_aux = quadCLT_aux.conditionImageSet(
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
sourceFiles, // String [] sourceFiles,
set_channels_aux[nSet].name(), // String set_name,
referenceExposures_aux, // double [] referenceExposures,
channelFiles_aux, // int [] channelFiles,
scaleExposures_aux, //output // double [] scaleExposures
saturation_imp_aux, //output // boolean [][] saturation_imp,
debugLevel); // int debugLevel);
// Tempporarily processing individaully with the old code
processCLTQuadCorrPairGpu(
gPUTileProcessor, // GPUTileProcessor gPUTileProcessor,
quadCLT_main, // QuadCLT quadCLT_main,
quadCLT_aux, // QuadCLT quadCLT_aux,
imp_srcs_main, // ImagePlus [] imp_quad_main,
imp_srcs_aux, // ImagePlus [] imp_quad_aux,
saturation_imp_main, // boolean [][] saturation_main, // (near) saturated pixels or null
saturation_imp_aux, // boolean [][] saturation_aux, // (near) saturated pixels or null
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
debayerParameters, // EyesisCorrectionParameters.DebayerParameters debayerParameters,
colorProcParameters, // EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
// channelGainParameters_main, // CorrectionColorProc.ColorGainsParameters channelGainParameters_main,
// channelGainParameters_aux, // CorrectionColorProc.ColorGainsParameters channelGainParameters_aux,
rgbParameters, // EyesisCorrectionParameters.RGBParameters rgbParameters,
scaleExposures_main, // double [] scaleExposures_main, // probably not needed here - restores brightness of the final image
scaleExposures_aux, // double [] scaleExposures_aux, // probably not needed here - restores brightness of the final image
false, // final boolean notch_mode, // use notch filter for inter-camera correlation to detect poles
// averages measurements
clt_parameters.rig.lt_avg_radius,// final int lt_rad, // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
// final boolean apply_corr, // calculate and apply additional fine geometry correction
// final boolean infinity_corr, // calculate and apply geometry correction at infinity
threadsMax, // final int threadsMax, // maximal number of threads to launch
updateStatus, // final boolean updateStatus,
debugLevel); // final int debugLevel);
Runtime.getRuntime().gc();
if (debugLevel >-1) System.out.println("Processing set "+(nSet+1)+" (of "+set_channels_aux.length+") finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
if (quadCLT_aux.eyesisCorrections.stopRequested.get()>0) {
System.out.println("User requested stop");
System.out.println("Processing "+(nSet + 1)+" file sets (of "+set_channels_main.length+") finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
return;
}
}
System.out.println("processCLTQuadCorrs(): processing "+(quadCLT_main.getTotalFiles(set_channels_main)+quadCLT_aux.getTotalFiles(set_channels_aux))+" files ("+set_channels_main.length+" file sets) finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
}
public ImagePlus [] processCLTQuadCorrPair(
QuadCLT quadCLT_main,
......@@ -740,6 +841,529 @@ public class TwoQuadCLT {
if (clt_parameters.gen_chn_img) {
// combine to a sliced color image
// assuming total number of images to be multiple of 4
// int [] slice_seq = {0,1,3,2}; //clockwise
int [] slice_seq = new int[results.length];
for (int i = 0; i < slice_seq.length; i++) {
slice_seq[i] = i ^ ((i >> 1) & 1); // 0,1,3,2,4,5,7,6, ...
}
int width = imps_RGB[0].getWidth();
int height = imps_RGB[0].getHeight();
ImageStack array_stack=new ImageStack(width,height);
for (int i = 0; i<slice_seq.length; i++){
if (imps_RGB[slice_seq[i]] != null) {
array_stack.addSlice("port_"+slice_seq[i], imps_RGB[slice_seq[i]].getProcessor().getPixels());
} else {
array_stack.addSlice("port_"+slice_seq[i], results[slice_seq[i]].getProcessor().getPixels());
}
}
ImagePlus imp_stack = new ImagePlus(name+"-SHIFTED-D"+clt_parameters.disparity, array_stack);
imp_stack.getProcessor().resetMinAndMax();
if (!batch_mode) {
imp_stack.updateAndDraw();
}
//imp_stack.getProcessor().resetMinAndMax();
//imp_stack.show();
// eyesisCorrections.saveAndShow(imp_stack, this.correctionsParameters);
quadCLT_main.eyesisCorrections.saveAndShowEnable(
imp_stack, // ImagePlus imp,
quadCLT_main.correctionsParameters, // EyesisCorrectionParameters.CorrectionParameters correctionsParameters,
true, // boolean enableSave,
!batch_mode) ;// boolean enableShow);
}
if (clt_parameters.gen_4_img) {
// Save as individual JPEG images in the model directory
String x3d_path= quadCLT_main.correctionsParameters.selectX3dDirectory(
name, // quad timestamp. Will be ignored if correctionsParameters.use_x3d_subdirs is false
quadCLT_main.correctionsParameters.x3dModelVersion,
true, // smart,
true); //newAllowed, // save
for (int sub_img = 0; sub_img < imps_RGB.length; sub_img++){
quadCLT_main.eyesisCorrections.saveAndShow(
imps_RGB[sub_img],
x3d_path,
quadCLT_main.correctionsParameters.png && !clt_parameters.black_back,
!batch_mode && clt_parameters.show_textures,
quadCLT_main.correctionsParameters.JPEG_quality, // jpegQuality); // jpegQuality){// <0 - keep current, 0 - force Tiff, >0 use for JPEG
(debugLevel > 0) ? debugLevel : 1); // int debugLevel (print what it saves)
}
String model_path= quadCLT_main.correctionsParameters.selectX3dDirectory(
name, // quad timestamp. Will be ignored if correctionsParameters.use_x3d_subdirs is false
null,
true, // smart,
true); //newAllowed, // save
quadCLT_main.createThumbNailImage(
imps_RGB[0],
model_path,
"thumb",
debugLevel);
}
}
return results;
}
public ImagePlus [] processCLTQuadCorrPairGpu(
GPUTileProcessor gPUTileProcessor,
QuadCLT quadCLT_main,
QuadCLT quadCLT_aux,
ImagePlus [] imp_quad_main,
ImagePlus [] imp_quad_aux,
boolean [][] saturation_main, // (near) saturated pixels or null
boolean [][] saturation_aux, // (near) saturated pixels or null
EyesisCorrectionParameters.CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
double [] scaleExposures_main, // probably not needed here - restores brightness of the final image
double [] scaleExposures_aux, // probably not needed here - restores brightness of the final image
boolean notch_mode, // use pole-detection mode for inter-camera correlation
final int lt_rad, // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel){
final boolean batch_mode = clt_parameters.batch_run; //disable any debug images
final boolean get_ers = !batch_mode;
// boolean batch_mode = false;
boolean infinity_corr = false;
double [][] scaleExposures= {scaleExposures_main, scaleExposures_aux};
boolean toRGB= quadCLT_main.correctionsParameters.toRGB;
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging? - TODO - move where it belongs
// may use this.StartTime to report intermediate steps execution times
String name=quadCLT_main.correctionsParameters.getModelName((String) imp_quad_main[0].getProperty("name"));
String path= (String) imp_quad_main[0].getProperty("path"); // Only for debug output
ImagePlus [] results = new ImagePlus[imp_quad_main.length + imp_quad_main.length];
for (int i = 0; i < results.length; i++) {
if (i< imp_quad_main.length) {
results[i] = imp_quad_main[i];
} else {
results[i] = imp_quad_main[i-imp_quad_main.length];
}
results[i].setTitle(results[i].getTitle()+"RAW");
}
if (debugLevel>1) System.out.println("processing: "+path);
getRigImageStacks(
clt_parameters, // EyesisCorrectionParameters.CLTParameters clt_parameters,
quadCLT_main, // QuadCLT quadCLT_main,
quadCLT_aux, // QuadCLT quadCLT_aux,
imp_quad_main, // ImagePlus [] imp_quad_main,
imp_quad_aux, // ImagePlus [] imp_quad_aux,
saturation_main, // boolean [][] saturation_main, // (near) saturated pixels or null
saturation_aux, // boolean [][] saturation_aux, // (near) saturated pixels or null
threadsMax, // maximal number of threads to launch
debugLevel); // final int debugLevel);
// temporary setting up tile task file (one integer per tile, bitmask
// for testing defined for a window, later the tiles to process will be calculated based on previous passes results
int [][] tile_op_main = quadCLT_main.tp.setSameTileOp(clt_parameters, clt_parameters.tile_task_op, debugLevel);
// int [][] tile_op_aux = quadCLT_aux.tp.setSameTileOp (clt_parameters, clt_parameters.tile_task_op, debugLevel);
double [][] disparity_array_main = quadCLT_main.tp.setSameDisparity(clt_parameters.disparity); // [tp.tilesY][tp.tilesX] - individual per-tile expected disparity
//TODO: Add array of default disparity - use for combining images in force disparity mode (no correlation), when disparity is predicted from other tiles
// start with all old arrays, remove some later
double [][][][] clt_corr_combo = null;
double [][][][] texture_tiles_main = null; // [tp.tilesY][tp.tilesX]["RGBA".length()][]; // tiles will be 16x16, 2 visualization mode full 16 or overlapped
double [][][][] texture_tiles_aux = null; // [tp.tilesY][tp.tilesX]["RGBA".length()][]; // tiles will be 16x16, 2 visualization mode full 16 or overlapped
// undecided, so 2 modes of combining alpha - same as rgb, or use center tile only
// Assuming same size of both image sets
final int tilesX = quadCLT_main.tp.getTilesX();
final int tilesY = quadCLT_main.tp.getTilesY();
final boolean keep_lt_corr_combo = clt_parameters.corr_keep;
// FIXME: Remove those huge arrays when not needed!
if (clt_parameters.correlate){
if (keep_lt_corr_combo) clt_corr_combo = new double [ImageDtt.TCORR_TITLES.length][tilesY][tilesX][];
texture_tiles_main = new double [tilesY][tilesX][][]; // ["RGBA".length()][];
texture_tiles_aux = new double [tilesY][tilesX][][]; // ["RGBA".length()][];
for (int i = 0; i < tilesY; i++){
for (int j = 0; j < tilesX; j++){
if (keep_lt_corr_combo) for (int k = 0; k<clt_corr_combo.length; k++) clt_corr_combo[k][i][j] = null;
texture_tiles_main[i][j] = null;
texture_tiles_aux[i][j] = null;
}
}
}
double [][] disparity_bimap = new double [ImageDtt.BIDISPARITY_TITLES.length][]; //[0] -residual disparity, [1] - orthogonal (just for debugging) last 4 - max pixel differences
ImageDtt image_dtt = new ImageDtt();
double [][] ml_data = null;
// int [][] woi_tops = {quadCLT_main.woi_tops,quadCLT_aux.woi_tops};
double [][][] ers_delay = get_ers?(new double [2][][]):null;
double [][][][][][] clt_kernels_main = quadCLT_main.getCLTKernels(); // [4][3][123][164]{[64],[64],[64],[64],[8]}
double [][][][][][] clt_kernels_aux = quadCLT_aux.getCLTKernels();
double [][] dbg_kern = clt_kernels_main[0][0][0][0];
// here all data is ready (images, kernels) to try GPU code
float [][] main_bayer = new float [quadCLT_main.image_data.length][quadCLT_main.image_data[0][0].length];
float [][] dst_bayer = new float [quadCLT_main.image_data.length][quadCLT_main.image_data[0][0].length];
for (int nc = 0; nc < main_bayer.length; nc++) {
int nc1 = (nc +1) % 4;
for (int i = 0; i < main_bayer[nc].length; i++) {
main_bayer[nc1][i] = (float) (quadCLT_main.image_data[nc][0][i] + quadCLT_main.image_data[nc][1][i] + quadCLT_main.image_data[nc][2][i]);
dst_bayer[nc][i]= nc*main_bayer[nc].length + i;
}
}
int iwidth = imp_quad_main[0].getWidth();
String [] dbg_titles= {"src0","dst0","src1","dst1","src2","dst2","src3","dst3"};
for (int nc = 0; nc < main_bayer.length; nc++) {
gPUTileProcessor.exec_dtt24(
main_bayer[nc], // float src_pixels[],
dst_bayer[nc], // float dst_pixels[],
iwidth, // int width,
0); // int dtt_mode);
}
float [][] both_bayer = {main_bayer[0],dst_bayer[0],main_bayer[1],dst_bayer[1],main_bayer[2],dst_bayer[2],main_bayer[3],dst_bayer[3]};
(new showDoubleFloatArrays()).showArrays(
both_bayer,
iwidth,
main_bayer[0].length / iwidth,
true,
"converted",
dbg_titles);
if (debugLevel < 1000) {
return null;
}
final double [][][][][][][] clt_bidata = // new double[2][quad][nChn][tilesY][tilesX][][]; // first index - main/aux
image_dtt.clt_bi_quad (
clt_parameters, // final EyesisCorrectionParameters.CLTParameters clt_parameters,
clt_parameters.fat_zero, // final double fatzero, // May use correlation fat zero from 2 different parameters - fat_zero and rig.ml_fatzero
notch_mode, // final boolean notch_mode, // use notch filter for inter-camera correlation to detect poles
lt_rad, // final int lt_rad, // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
// first measurement - use default setting
clt_parameters.rig.no_int_x0, // boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
tile_op_main, // final int [][] tile_op_main, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array_main, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
quadCLT_main.image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.saturation_imp, // final boolean [][] saturation_main, // (near) saturated pixels or null
quadCLT_aux.saturation_imp, // final boolean [][] saturation_aux, // (near) saturated pixels or null
// correlation results - combo will be for the correation between two quad cameras
clt_corr_combo, // final double [][][][] clt_corr_combo, // [type][tilesY][tilesX][(2*transform_size-1)*(2*transform_size-1)] // if null - will not calculate
// [type][tilesY][tilesX] should be set by caller
// types: 0 - selected correlation (product+offset), 1 - sum
disparity_bimap, // final double [][] disparity_bimap, // [23][tilesY][tilesX]
ml_data, // final double [][] ml_data, // data for ML - 10 layers - 4 center areas (3x3, 5x5,..) per camera-per direction, 1 - composite, and 1 with just 1 data (target disparity)
texture_tiles_main, // final double [][][][] texture_tiles_main, // [tilesY][tilesX]["RGBA".length()][]; null - will skip images combining
texture_tiles_aux, // final double [][][][] texture_tiles_aux, // [tilesY][tilesX]["RGBA".length()][]; null - will skip images combining
imp_quad_main[0].getWidth(), // final int width,
quadCLT_main.getGeometryCorrection(), // final GeometryCorrection geometryCorrection_main,
quadCLT_aux.getGeometryCorrection(), // final GeometryCorrection geometryCorrection_aux,
quadCLT_main.getCLTKernels(), // final double [][][][][][] clt_kernels_main, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
quadCLT_aux.getCLTKernels(), // final double [][][][][][] clt_kernels_aux, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.corr_magic_scale, // final double corr_magic_scale, // still not understood coefficient that reduces reported disparity value. Seems to be around 0.85
true, // final boolean keep_clt_data,
// woi_tops, // final int [][] woi_tops,
ers_delay, // final double [][][] ers_delay, // if not null - fill with tile center acquisition delay
threadsMax, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
if (ers_delay !=null) {
showERSDelay(ers_delay);
}
double [][] texture_nonoverlap_main = null;
double [][] texture_nonoverlap_aux = null;
double [][] texture_overlap_main = null;
double [][] texture_overlap_aux = null;
String [] rgba_titles = {"red","blue","green","alpha"};
String [] rgba_weights_titles = {"red","blue","green","alpha","port0","port1","port2","port3","r-rms","b-rms","g-rms","w-rms"};
if ((texture_tiles_main != null) && (texture_tiles_aux != null)){
if (clt_parameters.show_nonoverlap){
texture_nonoverlap_main = image_dtt.combineRGBATiles(
texture_tiles_main, // array [tp.tilesY][tp.tilesX][4][4*transform_size] or [tp.tilesY][tp.tilesX]{null}
clt_parameters.transform_size,
false, // when false - output each tile as 16x16, true - overlap to make 8x8
clt_parameters.sharp_alpha, // combining mode for alpha channel: false - treat as RGB, true - apply center 8x8 only
threadsMax, // maximal number of threads to launch
debugLevel);
sdfa_instance.showArrays(
texture_nonoverlap_main,
tilesX * (2 * clt_parameters.transform_size),
tilesY * (2 * clt_parameters.transform_size),
true,
name + "-TXTNOL-D"+clt_parameters.disparity+"-MAIN",
(clt_parameters.keep_weights?rgba_weights_titles:rgba_titles));
texture_nonoverlap_aux = image_dtt.combineRGBATiles(
texture_tiles_aux, // array [tp.tilesY][tp.tilesX][4][4*transform_size] or [tp.tilesY][tp.tilesX]{null}
clt_parameters.transform_size,
false, // when false - output each tile as 16x16, true - overlap to make 8x8
clt_parameters.sharp_alpha, // combining mode for alpha channel: false - treat as RGB, true - apply center 8x8 only
threadsMax, // maximal number of threads to launch
debugLevel);
sdfa_instance.showArrays(
texture_nonoverlap_aux,
tilesX * (2 * clt_parameters.transform_size),
tilesY * (2 * clt_parameters.transform_size),
true,
name + "-TXTNOL-D"+clt_parameters.disparity+"-AUX",
(clt_parameters.keep_weights?rgba_weights_titles:rgba_titles));
}
if (!infinity_corr && (clt_parameters.show_overlap || clt_parameters.show_rgba_color)){
int alpha_index = 3;
texture_overlap_main = image_dtt.combineRGBATiles(
texture_tiles_main, // array [tp.tilesY][tp.tilesX][4][4*transform_size] or [tp.tilesY][tp.tilesX]{null}
clt_parameters.transform_size,
true, // when false - output each tile as 16x16, true - overlap to make 8x8
clt_parameters.sharp_alpha, // combining mode for alpha channel: false - treat as RGB, true - apply center 8x8 only
threadsMax, // maximal number of threads to launch
debugLevel);
if (clt_parameters.alpha1 > 0){ // negative or 0 - keep alpha as it was
double scale = (clt_parameters.alpha1 > clt_parameters.alpha0) ? (1.0/(clt_parameters.alpha1 - clt_parameters.alpha0)) : 0.0;
for (int i = 0; i < texture_overlap_main[alpha_index].length; i++){
double d = texture_overlap_main[alpha_index][i];
if (d >=clt_parameters.alpha1) d = 1.0;
else if (d <=clt_parameters.alpha0) d = 0.0;
else d = scale * (d- clt_parameters.alpha0);
texture_overlap_main[alpha_index][i] = d;
}
}
texture_overlap_aux = image_dtt.combineRGBATiles(
texture_tiles_aux, // array [tp.tilesY][tp.tilesX][4][4*transform_size] or [tp.tilesY][tp.tilesX]{null}
clt_parameters.transform_size,
true, // when false - output each tile as 16x16, true - overlap to make 8x8
clt_parameters.sharp_alpha, // combining mode for alpha channel: false - treat as RGB, true - apply center 8x8 only
threadsMax, // maximal number of threads to launch
debugLevel);
if (clt_parameters.alpha1 > 0){ // negative or 0 - keep alpha as it was
double scale = (clt_parameters.alpha1 > clt_parameters.alpha0) ? (1.0/(clt_parameters.alpha1 - clt_parameters.alpha0)) : 0.0;
for (int i = 0; i < texture_overlap_aux[alpha_index].length; i++){
double d = texture_overlap_aux[alpha_index][i];
if (d >=clt_parameters.alpha1) d = 1.0;
else if (d <=clt_parameters.alpha0) d = 0.0;
else d = scale * (d- clt_parameters.alpha0);
texture_overlap_aux[alpha_index][i] = d;
}
}
if (!batch_mode && clt_parameters.show_overlap) {
sdfa_instance.showArrays(
texture_overlap_main,
tilesX * clt_parameters.transform_size,
tilesY * clt_parameters.transform_size,
true,
name + "-TXTOL-D"+clt_parameters.disparity+"-MAIN",
(clt_parameters.keep_weights?rgba_weights_titles:rgba_titles));
}
if (!batch_mode && clt_parameters.show_overlap) {
sdfa_instance.showArrays(
texture_overlap_aux,
tilesX * clt_parameters.transform_size,
tilesY * clt_parameters.transform_size,
true,
name + "-TXTOL-D"+clt_parameters.disparity+"-AUX",
(clt_parameters.keep_weights?rgba_weights_titles:rgba_titles));
}
if (!batch_mode && clt_parameters.show_rgba_color) {
// for now - use just RGB. Later add option for RGBA
double [][] texture_rgb_main = {texture_overlap_main[0],texture_overlap_main[1],texture_overlap_main[2]};
double [][] texture_rgba_main = {texture_overlap_main[0],texture_overlap_main[1],texture_overlap_main[2],texture_overlap_main[3]};
double [][] texture_rgb_aux = {texture_overlap_aux[0],texture_overlap_aux[1],texture_overlap_aux[2]};
double [][] texture_rgba_aux = {texture_overlap_aux[0],texture_overlap_aux[1],texture_overlap_aux[2],texture_overlap_aux[3]};
ImagePlus imp_texture_main = quadCLT_main.linearStackToColor(
clt_parameters,
colorProcParameters,
rgbParameters,
name+"-texture", // String name,
"-D"+clt_parameters.disparity+"-MAIN", //String suffix, // such as disparity=...
toRGB,
!quadCLT_main.correctionsParameters.jpeg, // boolean bpp16, // 16-bit per channel color mode for result
false, // true, // boolean saveShowIntermediate, // save/show if set globally
false, // true, // boolean saveShowFinal, // save/show result (color image?)
((clt_parameters.alpha1 > 0)? texture_rgba_main: texture_rgb_main),
tilesX * clt_parameters.transform_size,
tilesY * clt_parameters.transform_size,
1.0, // double scaleExposure, // is it needed?
debugLevel );
ImagePlus imp_texture_aux = quadCLT_aux.linearStackToColor(
clt_parameters,
colorProcParameters,
rgbParameters,
name+"-texture", // String name,
"-D"+clt_parameters.disparity+"-AUX", //String suffix, // such as disparity=...
toRGB,
!quadCLT_aux.correctionsParameters.jpeg, // boolean bpp16, // 16-bit per channel color mode for result
false, // true, // boolean saveShowIntermediate, // save/show if set globally
false, // true, // boolean saveShowFinal, // save/show result (color image?)
((clt_parameters.alpha1 > 0)? texture_rgba_aux: texture_rgb_aux),
tilesX * clt_parameters.transform_size,
tilesY * clt_parameters.transform_size,
1.0, // double scaleExposure, // is it needed?
debugLevel );
int width = imp_texture_main.getWidth();
int height =imp_texture_main.getHeight();
ImageStack texture_stack=new ImageStack(width,height);
texture_stack.addSlice("main", imp_texture_main.getProcessor().getPixels());
texture_stack.addSlice("auxiliary", imp_texture_aux. getProcessor().getPixels());
ImagePlus imp_texture_stack = new ImagePlus(name+"-TEXTURES-D"+clt_parameters.disparity, texture_stack);
imp_texture_stack.getProcessor().resetMinAndMax();
// imp_texture_stack.updateAndDraw();
imp_texture_stack.show();
}
}
}
// visualize correlation results (will be for inter-pair correlation)
if (clt_corr_combo!=null){
if (disparity_bimap != null){
if (!batch_mode && clt_parameters.show_map && (debugLevel > -2)){
sdfa_instance.showArrays(
disparity_bimap,
tilesX,
tilesY,
true,
name+"-DISP_MAP-D"+clt_parameters.disparity,
ImageDtt.BIDISPARITY_TITLES);
boolean [] trusted = getTrustedDisparity(
quadCLT_main, // QuadCLT quadCLT_main, // tiles should be set
quadCLT_aux, // QuadCLT quadCLT_aux,
true, // boolean use_individual,
0.14, // double min_combo_strength, // check correlation strength combined for all 3 correlations
clt_parameters.grow_disp_trust, // double max_trusted_disparity, // 4.0 -> change to rig_trust
1.0, // double trusted_tolerance,
null, // boolean [] was_trusted,
disparity_bimap ); // double [][] bimap // current state of measurements
for (int layer = 0; layer < disparity_bimap.length; layer ++) if (disparity_bimap[layer] != null){
for (int nTile = 0; nTile < disparity_bimap[layer].length; nTile++) {
if (!trusted[nTile]) disparity_bimap[layer][nTile] = Double.NaN;
}
}
sdfa_instance.showArrays(
disparity_bimap,
tilesX,
tilesY,
true,
name+"-DISP_MAP_TRUSTED-D"+clt_parameters.disparity,
ImageDtt.BIDISPARITY_TITLES);
}
}
if (!batch_mode && !infinity_corr && clt_parameters.corr_show && (debugLevel > -2)){
double [][] corr_rslt = new double [clt_corr_combo.length][];
String [] titles = new String[clt_corr_combo.length]; // {"combo","sum"};
for (int i = 0; i< titles.length; i++) titles[i] = ImageDtt.TCORR_TITLES[i];
for (int i = 0; i<corr_rslt.length; i++) {
corr_rslt[i] = image_dtt.corr_dbg(
clt_corr_combo[i],
2*clt_parameters.transform_size - 1,
clt_parameters.corr_border_contrast,
threadsMax,
debugLevel);
}
sdfa_instance.showArrays(
corr_rslt,
tilesX*(2*clt_parameters.transform_size),
tilesY*(2*clt_parameters.transform_size),
true,
name + "-CORR-D"+clt_parameters.disparity,
titles );
}
}
// final double [][][][][][][] clt_bidata = // new double[2][quad][nChn][tilesY][tilesX][][]; // first index - main/aux
int quad_main = clt_bidata[0].length;
int quad_aux = clt_bidata[1].length;
if (!infinity_corr && (clt_parameters.gen_chn_img || clt_parameters.gen_4_img || clt_parameters.gen_chn_stacks)) {
ImagePlus [] imps_RGB = new ImagePlus[quad_main+quad_aux];
for (int iQuadComb = 0; iQuadComb < imps_RGB.length; iQuadComb++){
int iAux = (iQuadComb >= quad_main) ? 1 : 0;
int iSubCam= iQuadComb - iAux * quad_main;
// Uncomment to have master/aux names
// String title=name+"-"+String.format("%s%02d", ((iAux>0)?"A":"M"),iSubCam);
String title=name+"-"+String.format("%02d", iQuadComb);
if (clt_parameters.corr_sigma > 0){ // no filter at all
for (int chn = 0; chn < clt_bidata[iAux][iSubCam].length; chn++) {
image_dtt.clt_lpf(
clt_parameters.corr_sigma,
clt_bidata[iAux][iSubCam][chn],
clt_parameters.transform_size,
threadsMax,
debugLevel);
}
}
if (debugLevel > 0){
System.out.println("--tilesX="+tilesX);
System.out.println("--tilesY="+tilesY);
}
if (!batch_mode && (debugLevel > 0)){
double [][] clt = new double [clt_bidata[iAux][iSubCam].length*4][];
for (int chn = 0; chn < clt_bidata[iAux][iSubCam].length; chn++) {
double [][] clt_set = image_dtt.clt_dbg(
clt_bidata[iAux][iSubCam][chn],
threadsMax,
debugLevel);
for (int ii = 0; ii < clt_set.length; ii++) clt[chn*4+ii] = clt_set[ii];
}
if (debugLevel > 0){
sdfa_instance.showArrays(clt,
tilesX*clt_parameters.transform_size,
tilesY*clt_parameters.transform_size,
true,
results[iQuadComb].getTitle()+"-CLT-D"+clt_parameters.disparity);
}
}
double [][] iclt_data = new double [clt_bidata[iAux][iSubCam].length][];
for (int chn=0; chn<iclt_data.length;chn++){
iclt_data[chn] = image_dtt.iclt_2d(
clt_bidata[iAux][iSubCam][chn], // scanline representation of dcd data, organized as dct_size x dct_size tiles
clt_parameters.transform_size, // final int
clt_parameters.clt_window, // window_type
15, // clt_parameters.iclt_mask, //which of 4 to transform back
0, // clt_parameters.dbg_mode, //which of 4 to transform back
threadsMax,
debugLevel);
}
if (clt_parameters.gen_chn_stacks) sdfa_instance.showArrays(iclt_data,
(tilesX + 0) * clt_parameters.transform_size,
(tilesY + 0) * clt_parameters.transform_size,
true,
results[iQuadComb].getTitle()+"-ICLT-RGB-D"+clt_parameters.disparity);
if (!clt_parameters.gen_chn_img) continue;
imps_RGB[iQuadComb] = quadCLT_main.linearStackToColor( // probably no need to separate and process the second half with quadCLT_aux
clt_parameters,
colorProcParameters,
rgbParameters,
title, // String name,
"-D"+clt_parameters.disparity, //String suffix, // such as disparity=...
toRGB,
!quadCLT_main.correctionsParameters.jpeg, // boolean bpp16, // 16-bit per channel color mode for result
!batch_mode, // true, // boolean saveShowIntermediate, // save/show if set globally
false, // boolean saveShowFinal, // save/show result (color image?)
iclt_data,
tilesX * clt_parameters.transform_size,
tilesY * clt_parameters.transform_size,
scaleExposures[iAux][iSubCam], // double scaleExposure, // is it needed?
debugLevel );
} // end of generating shifted channel images
if (clt_parameters.gen_chn_img) {
// combine to a sliced color image
// assuming total number of images to be multiple of 4
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment