Commit 81a46af5 authored by Andrey Filippov's avatar Andrey Filippov

Implemented enlarged reference window for rendering

parent 33977018
......@@ -102,6 +102,7 @@ public class GPUTileProcessor {
static String GPU_CLEAR_TEXTURE_RBGA_NAME = "clear_texture_rbga";
static String GPU_TEXTURES_ACCUMULATE_NAME = "textures_accumulate";
static String GPU_CREATE_NONOVERLAP_LIST_NAME ="create_nonoverlap_list";
static String GPU_ERASE_CLT_TILES_NAME = "erase_clt_tiles";
......@@ -180,6 +181,7 @@ public class GPUTileProcessor {
CUfunction GPU_CLEAR_TEXTURE_RBGA_kernel = null; // "clear_texture_rbga"
CUfunction GPU_TEXTURES_ACCUMULATE_kernel = null; // "textures_accumulate"
CUfunction GPU_CREATE_NONOVERLAP_LIST_kernel = null; // "create_nonoverlap_list"
CUfunction GPU_ERASE_CLT_TILES_kernel = null; // "erase_clt_tiles"
......@@ -331,7 +333,8 @@ public class GPUTileProcessor {
GPU_GEN_TEXTURE_LIST_NAME,
GPU_CLEAR_TEXTURE_RBGA_NAME,
GPU_TEXTURES_ACCUMULATE_NAME,
GPU_CREATE_NONOVERLAP_LIST_NAME
GPU_CREATE_NONOVERLAP_LIST_NAME,
GPU_ERASE_CLT_TILES_NAME
};
CUfunction[] functions = createFunctions(kernelSources,
func_names,
......@@ -356,6 +359,7 @@ public class GPUTileProcessor {
GPU_CLEAR_TEXTURE_RBGA_kernel = functions[15];
GPU_TEXTURES_ACCUMULATE_kernel = functions[16];
GPU_CREATE_NONOVERLAP_LIST_kernel = functions[17];
GPU_ERASE_CLT_TILES_kernel = functions[18];
System.out.println("GPU kernel functions initialized");
......@@ -378,6 +382,7 @@ public class GPUTileProcessor {
System.out.println(GPU_CLEAR_TEXTURE_RBGA_kernel.toString());
System.out.println(GPU_TEXTURES_ACCUMULATE_kernel.toString());
System.out.println(GPU_CREATE_NONOVERLAP_LIST_kernel.toString());
System.out.println(GPU_ERASE_CLT_TILES_kernel.toString());
// GPU data structures are now initialized through GpuQuad instances
}
......
......@@ -1464,33 +1464,44 @@ public class GpuQuad{ // quad camera description
/**
* Direct CLT conversion and aberration correction
*/
public void execConvertDirect() {
public void execConvertDirect(int erase_clt) {
execConvertDirect(
false,
null);
null,
erase_clt);
}
/**
* Convert and save TD representation in either normal or reference scene. Reference scene TD representation
* is used for interscene correlation (for "IMU")
* @param ref_scene save result into a separate buffer for interscene correlation when true.
* @param wh window width, height (or null)
* @param erase_clt erase CLT data. Only needed before execImcltRbgAll() if not all the
* tiles are converted. <0 - do not erase, 0 - erase to 0, 1 - erase to NaN
*/
public void execConvertDirect(
boolean ref_scene,
int [] wh) {
boolean ref_scene,
int [] wh,
int erase_clt) {
if (this.gpuTileProcessor.GPU_CONVERT_DIRECT_kernel == null)
{
IJ.showMessage("Error", "No GPU kernel: GPU_CONVERT_DIRECT_kernel");
return;
}
if (this.gpuTileProcessor.GPU_ERASE_CLT_TILES_kernel == null)
{
IJ.showMessage("Error", "No GPU kernel: GPU_ERASE_CLT_TILES_kernel");
return;
}
if (wh == null) {
wh = new int[] {img_width, img_height};
}
setConvolutionKernels(false); // set kernels if they are not set already
setBayerImages(false); // set Bayer images if this.quadCLT instance has new ones
// kernel parameters: pointer to pointers
// int tilesX = img_width / GPUTileProcessor.DTT_SIZE;
int tilesX = wh[0] / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
// De-allocate if size mismatch, allocate if needed. Now it is the only place where clt is allocated
if (ref_scene) {
if ((gpu_clt_ref_wh != null) && ((gpu_clt_ref_wh[0] != wh[0]) || (gpu_clt_ref_wh[1] != wh[1]))) {
......@@ -1503,8 +1514,6 @@ public class GpuQuad{ // quad camera description
}
if (gpu_clt_ref == null) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_ref_l = new long [num_cams];
// int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
gpu_clt_ref_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_h[ncam] = new CUdeviceptr();
......@@ -1530,8 +1539,6 @@ public class GpuQuad{ // quad camera description
}
if (gpu_clt == null) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_l = new long [num_cams];
// int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
gpu_clt_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_h[ncam] = new CUdeviceptr();
......@@ -1547,28 +1554,32 @@ public class GpuQuad{ // quad camera description
gpu_clt_wh = wh.clone();
}
}
/*
if (ref_scene && (gpu_clt_ref == null)) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_ref_l = new long [num_cams];
int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
gpu_clt_ref_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref_h[ncam],
tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
}
gpu_clt_ref = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref, num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_ref_h[ncam]);
}
cuMemcpyHtoD(gpu_clt_ref, Pointer.to(gpu_clt_ref_l), num_cams * Sizeof.POINTER);
gpu_clt_ref_wh = wh.clone();
}
*/
CUdeviceptr gpu_clt_selected = ref_scene ? gpu_clt_ref : gpu_clt;
int [] GridFullWarps = {1, 1, 1};
int [] ThreadsFullWarps = {1, 1, 1};
if (erase_clt >= 0) {
float fill_data = (erase_clt > 0) ? Float.NaN : 0.0f;
Pointer kernelParametersEraseClt = Pointer.to(
Pointer.to(new int[] { num_cams}), // int num_cams,
Pointer.to(new int[] { num_colors}), // int num_colors,
Pointer.to(new int[] { tilesX}), // int tiles_x,
Pointer.to(new int[] { tilesY}), // int tiles_y,
Pointer.to(gpu_clt_selected), // float ** gpu_clt,// [num_cams][tiles_y][tiles_x][num_colors][4*DTT_SIZE*DTT_SIZE]
Pointer.to(new float[] { fill_data})); // float fill_data
cuCtxSynchronize();
// Call the kernel function
cuLaunchKernel(this.gpuTileProcessor.GPU_ERASE_CLT_TILES_kernel,
GridFullWarps[0], GridFullWarps[1], GridFullWarps[2], // Grid dimension
ThreadsFullWarps[0], ThreadsFullWarps[1],ThreadsFullWarps[2],// Block dimension
0, null, // Shared memory size and stream (shared - only dynamic, static is in code)
kernelParametersEraseClt, null); // Kernel- and extra parameters
cuCtxSynchronize(); // remove later CUDA_ERROR_ILLEGAL_ADDRESS
if (getGpu_debug_level() > -1) {
System.out.println("======execConvertDirect(): erased CLT");
}
}
Pointer kernelParameters = Pointer.to(
Pointer.to(new int[] { num_cams}), // int num_cams,
Pointer.to(new int[] { num_colors}), // int num_colors,
......@@ -1598,7 +1609,7 @@ public class GpuQuad{ // quad camera description
kernelParameters, null); // Kernel- and extra parameters
cuCtxSynchronize(); // remove later CUDA_ERROR_ILLEGAL_ADDRESS
if (getGpu_debug_level() > -1) {
System.out.println("======execConvertDirect("+ref_scene+")");
System.out.println("======execConvertDirect("+ref_scene+", "+erase_clt+")");
}
}
......
......@@ -339,7 +339,7 @@ public class ImageDtt extends ImageDttCPU {
}
gpuQuad.execConvertDirect();
gpuQuad.execConvertDirect(-1); // boolean erase_clt
if (iclt_fimg != null) {
gpuQuad.execImcltRbgAll(isMonochrome()); // execute GPU kernel
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
......@@ -958,7 +958,7 @@ public class ImageDtt extends ImageDttCPU {
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect();
gpuQuad.execConvertDirect(-1); // boolean erase_clt
if (mcorr_sel == 0) { // no correlation at all
return;
}
......@@ -1124,7 +1124,7 @@ public class ImageDtt extends ImageDttCPU {
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect();
gpuQuad.execConvertDirect(-1); // boolean erase_clt
if (sensor_mask_inter == 0) { // no correlation at all
return;
}
......@@ -1143,6 +1143,9 @@ public class ImageDtt extends ImageDttCPU {
/**
* Convert reference scene to FD and save result in extra GPU array for the future interscene correlation
* Geometry correction and images will come from gpuQuad instance -
* @param erase_clt erase CLT (<0 - do not erase, 0 - erase to 0.0, >0 - erase to NaN). Needed only for later IMCLT
* end rendering images. NaN produces sharp, distinct borders; 0f - blended
* @param wh if null, will uses sensor dimensions. Otherwise {width, height} in pixels
* @param imgdtt_params
* @param use_reference_buffer true - use extra GPU array, false - use main one
* @param tp_tasks
......@@ -1154,6 +1157,7 @@ public class ImageDtt extends ImageDttCPU {
* @param globalDebugLevel
*/
public void setReferenceTD(
final int erase_clt,
final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
final boolean use_reference_buffer,
......@@ -1186,7 +1190,8 @@ public class ImageDtt extends ImageDttCPU {
false); // boolean use_aux // while is it in class member? - just to be able to free
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
// int [] wh = null;
gpuQuad.execConvertDirect(use_reference_buffer, wh); // put results into a "reference" buffer
// int erase_clt = 1; // NaN;
gpuQuad.execConvertDirect(use_reference_buffer, wh, erase_clt); // put results into a "reference" buffer
}
......@@ -1297,7 +1302,7 @@ public class ImageDtt extends ImageDttCPU {
/// gpuQuad.execSetTilesOffsets(); // prepare tiles offsets in GPU memory
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect();
gpuQuad.execConvertDirect(-1); // boolean erase_clt
//Generate 2D phase correlations from the CLT representation
int mcorr_sel = Correlation2d.corrSelEncode(imgdtt_params,numSensors);
......@@ -1688,7 +1693,7 @@ public class ImageDtt extends ImageDttCPU {
// GPUTileProcessor.TpTask[] tp_tasks_full2)
gpuQuad.execConvertDirect();
gpuQuad.execConvertDirect(-1); // boolean erase_clt
if (iclt_fimg != null) {
gpuQuad.execImcltRbgAll(isMonochrome()); // execute GPU kernel
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
......
......@@ -37,6 +37,8 @@ public class IntersceneMatchParameters {
public boolean show_images = false; // color, infinity
public boolean show_images_bgfg = false; // bg and fg
public boolean show_images_mono = false; // float, monochrome 16-slice images (same disparity, COMBO_DSN_INDX_DISP_FG and COMBO_DSN_INDX_DISP_BG_ALL,
public boolean show_color_nan = true; // use NAN background for color images (sharp, but distinct black)
public boolean show_mono_nan = false; // use NAN background for monochrome images (sharp, but distinct black)
public boolean show_ranges = true;
public double range_disparity_offset = -0.08;
......@@ -120,9 +122,13 @@ public class IntersceneMatchParameters {
"Show foreground and background exported images");
gd.addCheckbox ("Show floating-point monochrome images", this.show_images_mono,
"Display generated/saved monochrome images");
gd.addCheckbox ("Color NaN background", this.show_color_nan,
"Use NaN for undefined tiles (false - 0.0f). NaN produces sharp distinct result, 0.0f - blended");
gd.addCheckbox ("Mono NaN background", this.show_mono_nan,
"Use NaN for undefined tiles (false - 0.0f). NaN produces sharp distinct result, 0.0f - blended");
gd.addCheckbox ("Show distances in meters", this.show_ranges,
"Calculate strength, distance, X, and Y in meters");
gd.addNumericField("Disparity at infinity", this.range_disparity_offset, 5,7,"pix",
"Disparity at infinity - subtract from measured disparity when converting to ranges.");
gd.addNumericField("Minimal strength for range calculation", this.range_min_strength, 5,7,"",
......@@ -234,6 +240,8 @@ public class IntersceneMatchParameters {
this.show_images = gd.getNextBoolean();
this.show_images_bgfg = gd.getNextBoolean();
this.show_images_mono = gd.getNextBoolean();
this.show_color_nan = gd.getNextBoolean();
this.show_mono_nan = gd.getNextBoolean();
this.show_ranges = gd.getNextBoolean();
this.range_disparity_offset = gd.getNextNumber();
this.range_min_strength = gd.getNextNumber();
......@@ -292,12 +300,12 @@ public class IntersceneMatchParameters {
properties.setProperty(prefix+"show_images", this.show_images + ""); // boolean
properties.setProperty(prefix+"show_images_bgfg", this.show_images_bgfg + ""); // boolean
properties.setProperty(prefix+"show_images_mono", this.show_images_mono + ""); // boolean
properties.setProperty(prefix+"show_color_nan", this.show_color_nan + ""); // boolean
properties.setProperty(prefix+"show_mono_nan", this.show_mono_nan + ""); // boolean
properties.setProperty(prefix+"show_ranges", this.show_ranges + ""); // boolean
properties.setProperty(prefix+"range_disparity_offset",this.range_disparity_offset+""); // double
properties.setProperty(prefix+"range_min_strength", this.range_min_strength+""); // double
properties.setProperty(prefix+"range_max", this.range_max+""); // double
properties.setProperty(prefix+"margin", this.margin+""); // int
properties.setProperty(prefix+"sensor_mask_inter", this.sensor_mask_inter+""); // int
properties.setProperty(prefix+"use_partial", this.use_partial+""); // boolean
......@@ -349,6 +357,8 @@ public class IntersceneMatchParameters {
if (properties.getProperty(prefix+"show_images")!=null) this.show_images=Boolean.parseBoolean(properties.getProperty(prefix+"show_images"));
if (properties.getProperty(prefix+"show_images_bgfg")!=null) this.show_images_bgfg=Boolean.parseBoolean(properties.getProperty(prefix+"show_images_bgfg"));
if (properties.getProperty(prefix+"show_images_mono")!=null) this.show_images_mono=Boolean.parseBoolean(properties.getProperty(prefix+"show_images_mono"));
if (properties.getProperty(prefix+"show_color_nan")!=null) this.show_color_nan=Boolean.parseBoolean(properties.getProperty(prefix+"show_color_nan"));
if (properties.getProperty(prefix+"show_mono_nan")!=null) this.show_mono_nan=Boolean.parseBoolean(properties.getProperty(prefix+"show_mono_nan"));
if (properties.getProperty(prefix+"show_ranges")!=null) this.show_images=Boolean.parseBoolean(properties.getProperty(prefix+"show_ranges"));
if (properties.getProperty(prefix+"range_disparity_offset")!=null) this.range_disparity_offset=Double.parseDouble(properties.getProperty(prefix+"range_disparity_offset"));
if (properties.getProperty(prefix+"range_min_strength")!=null) this.range_min_strength=Double.parseDouble(properties.getProperty(prefix+"range_min_strength"));
......@@ -406,6 +416,8 @@ public class IntersceneMatchParameters {
imp.show_images = this.show_images;
imp.show_images_bgfg = this.show_images_bgfg;
imp.show_images_mono = this.show_images_mono;
imp.show_color_nan = this.show_color_nan;
imp.show_mono_nan = this.show_mono_nan;
imp.show_ranges = this.show_ranges;
imp.range_disparity_offset = this.range_disparity_offset;
imp.range_min_strength = this.range_min_strength;
......
......@@ -3963,6 +3963,9 @@ public class OpticalFlow {
boolean export_images = clt_parameters.imp.export_images;
boolean export_dsi_image = clt_parameters.imp.show_ranges;
boolean show_images = clt_parameters.imp.show_images;
boolean show_color_nan = clt_parameters.imp.show_color_nan;
boolean show_mono_nan = clt_parameters.imp.show_mono_nan;
boolean show_images_bgfg = clt_parameters.imp.show_images_bgfg;
boolean show_images_mono = clt_parameters.imp.show_images_mono;
......@@ -4279,11 +4282,12 @@ public class OpticalFlow {
Arrays.fill(constant_disparity,clt_parameters.disparity);
Rectangle testr = new Rectangle(10, 8, 100,80);
ImagePlus imp_constant = QuadCLT.renderGPUFromDSI(
testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
-1, // final int sensor_mask,
null, // testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
constant_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
new double[] {.1,0.1,.1}, // ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
ZERO3, // new double[] {.1,0.1,.1}, // ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
true, // toRGB, // final boolean toRGB,
"GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
......@@ -4293,7 +4297,8 @@ public class OpticalFlow {
null, // "GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
imp_constant); // ImagePlus imp)
ImagePlus imp_constant_mono = QuadCLT.renderGPUFromDSI(
testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
-1, // final int sensor_mask,
null, // testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
constant_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
......@@ -4313,6 +4318,7 @@ public class OpticalFlow {
}
}
ImagePlus imp_fg = QuadCLT.renderGPUFromDSI(
-1, // final int sensor_mask,
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
fg_disparity, // double [] disparity_ref,
......@@ -4327,6 +4333,7 @@ public class OpticalFlow {
null, // "GPU-SHIFTED-FOREGROUND", // String suffix,
imp_fg); // ImagePlus imp)
ImagePlus imp_fg_mono = QuadCLT.renderGPUFromDSI(
-1, // final int sensor_mask,
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
fg_disparity, // double [] disparity_ref,
......@@ -4347,6 +4354,7 @@ public class OpticalFlow {
}
}
ImagePlus imp_bg = QuadCLT.renderGPUFromDSI(
-1, // final int sensor_mask,
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
bg_disparity, // double [] disparity_ref,
......@@ -4361,6 +4369,7 @@ public class OpticalFlow {
null, // "GPU-SHIFTED-BACKGROUND", // String suffix,
imp_bg); // ImagePlus imp)
ImagePlus imp_bg_mono = QuadCLT.renderGPUFromDSI(
-1, // final int sensor_mask,
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
bg_disparity, // double [] disparity_ref,
......@@ -9812,6 +9821,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)final float [][][] accum_2d_corr, // if [1][][] - return accumulated 2d correlations (all pairs)
int debug_level)
{
TileProcessor tp = ref_scene.getTileProcessor();
// Temporary reusing same ref scene ******
boolean scene_is_ref_test = clt_parameters.imp.scene_is_ref_test; // false; // true;
......@@ -9821,6 +9831,8 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
boolean show_render_scene = clt_parameters.imp.renderScene(); // false; // true;
boolean toRGB = clt_parameters.imp.toRGB ; // true;
boolean show_coord_motion = clt_parameters.imp.showCorrMotion(); // mae its own
int erase_clt = (toRGB? clt_parameters.imp.show_color_nan : clt_parameters.imp.show_mono_nan) ? 1:0;
if (scene_is_ref_test) {
scene_xyz = ZERO3.clone();
scene_atr = ZERO3.clone();
......@@ -9907,6 +9919,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
float [][][][] fcorr_td = null; // no accumulation, use data in GPU
ref_scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
image_dtt.setReferenceTD(
erase_clt,
null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
true, // final boolean use_reference_buffer,
......@@ -9919,6 +9932,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
debug_level); // final int globalDebugLevel);
if (show_render_ref) {
ImagePlus imp_render_ref = ref_scene.renderFromTD (
-1, // final int sensor_mask,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
......@@ -9948,6 +9962,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
debug_level); // final int globalDebugLevel);
if (show_render_scene) {
ImagePlus imp_render_scene = scene.renderFromTD (
-1, // final int sensor_mask,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
......
......@@ -2031,6 +2031,7 @@ public class QuadCLT extends QuadCLTCPU {
}
public static ImagePlus renderGPUFromDSI(
final int sensor_mask,
final Rectangle full_woi_in, // show larger than sensor WOI in tiles (or null)
CLTParameters clt_parameters,
double [] disparity_ref,
......@@ -2041,6 +2042,7 @@ public class QuadCLT extends QuadCLTCPU {
String suffix,
int threadsMax,
final int debugLevel){
boolean show_nan = toRGB? clt_parameters.imp.show_color_nan : clt_parameters.imp.show_mono_nan;
double [][] pXpYD =OpticalFlow.transformToScenePxPyD(
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
......@@ -2080,7 +2082,9 @@ public class QuadCLT extends QuadCLTCPU {
full_woi_in.width * GPUTileProcessor.DTT_SIZE,
full_woi_in.height * GPUTileProcessor.DTT_SIZE};
// boolean toRGB = true; // does not work here, define in ColorProcParameters
int erase_clt = show_nan ? 1:0;
image_dtt.setReferenceTD( // change to main?
erase_clt, //final int erase_clt,
wh, // null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference, // true, // final boolean use_reference_buffer,
......@@ -2092,6 +2096,7 @@ public class QuadCLT extends QuadCLTCPU {
threadsMax, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
ImagePlus imp_render = scene.renderFromTD (
sensor_mask, // final int sensor_mask,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,\
......@@ -2102,72 +2107,9 @@ public class QuadCLT extends QuadCLTCPU {
return imp_render;
}
@Deprecated
public static ImagePlus renderGPUFromDSI( // being replaced by the above
CLTParameters clt_parameters,
double [] disparity_ref,
final double [] scene_xyz, // camera center in world coordinates
final double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final boolean toRGB,
String suffix,
int threadsMax,
final int debugLevel){
double [][] pXpYD =OpticalFlow.transformToScenePxPyD(
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax)
TpTask[] tp_tasks_ref = GpuQuad.setInterTasks(
scene.getNumSensors(),
scene.getErsCorrection().getSensorWH()[0],
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
0.0, // final double disparity_corr,
0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
threadsMax); // final int threadsMax) // maximal number of threads to launch
scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
ImageDtt image_dtt = new ImageDtt(
scene.getNumSensors(),
clt_parameters.transform_size,
clt_parameters.img_dtt,
scene.isAux(),
scene.isMonochrome(),
scene.isLwir(),
clt_parameters.getScaleStrength(scene.isAux()),
scene.getGPU());
boolean use_reference = false;
// boolean toRGB = true; // does not work here, define in ColorProcParameters
image_dtt.setReferenceTD( // change to main?
null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference, // true, // final boolean use_reference_buffer,
tp_tasks_ref, // final TpTask[] tp_tasks,
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
threadsMax, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
ImagePlus imp_render = scene.renderFromTD (
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
use_reference, //boolean use_reference
suffix); // String suffix)
return imp_render;
}
public ImagePlus renderFromTD (
int sensor_mask,
CLTParameters clt_parameters,
ColorProcParameters colorProcParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
......@@ -2183,7 +2125,7 @@ public class QuadCLT extends QuadCLTCPU {
wh); //int [] wh
// get data back from GPU
float [][][] iclt_fimg = new float [getNumSensors()][][];
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) if (((1 << ncam) & sensor_mask) != 0){
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // updated window
}
// 2022/06/15 - handles variable window size
......@@ -2213,7 +2155,7 @@ public class QuadCLT extends QuadCLTCPU {
}
/* Prepare 4-channel images*/
ImagePlus [] imps_RGB = new ImagePlus[iclt_fimg.length];
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) if (iclt_fimg[ncam] != null){
String title=String.format("%s%s-%02d",image_name, sAux(), ncam);
imps_RGB[ncam] = linearStackToColor( // probably no need to separate and process the second half with quadCLT_aux (!)
clt_parameters,
......@@ -2243,7 +2185,7 @@ public class QuadCLT extends QuadCLTCPU {
int width = imps_RGB[0].getWidth();
int height = imps_RGB[0].getHeight();
ImageStack array_stack=new ImageStack(width,height);
for (int i = 0; i<slice_seq.length; i++){
for (int i = 0; i<slice_seq.length; i++) if (imps_RGB[slice_seq[i]] != null){
/// if (imps_RGB[slice_seq[i]] != null) {
array_stack.addSlice("port_"+slice_seq[i], imps_RGB[slice_seq[i]].getProcessor().getPixels());
/// } else {
......@@ -2393,7 +2335,7 @@ public class QuadCLT extends QuadCLTCPU {
}
gpuQuad.execConvertDirect();
gpuQuad.execConvertDirect(-1); // boolean erase_clt
int mcorr_sel = Correlation2d.corrSelEncode(clt_parameters.img_dtt, getNumSensors());
if (test_execCorr2D) {
......@@ -2961,7 +2903,7 @@ public class QuadCLT extends QuadCLTCPU {
for (int i = 0; i < NREPEAT; i++ ) {
// Direct CLT conversion and aberration correction
quadCLT_main.getGPU().execConvertDirect();
quadCLT_main.getGPU().execConvertDirect(-1); // boolean erase_clt
}
long startIMCLT=System.nanoTime();
......
......@@ -7062,7 +7062,7 @@ public class QuadCLTCPU {
too_cold *= iclt_data.length;
too_hot *= iclt_data.length;
int [] hist = null;
for (int iQuad = 0; iQuad < iclt_data.length; iQuad++) {
for (int iQuad = 0; iQuad < iclt_data.length; iQuad++) if (iclt_data[iQuad] != null){
int [] this_hist = getLwirHistogram(
iclt_data[iQuad][ncol], // double [] data,
hard_cold,
......@@ -7079,11 +7079,11 @@ public class QuadCLTCPU {
double [] rel_lim = {
getMarginFromHist(
hist, // histogram
too_cold, // double cumul_val, // cummulative number of items to be ignored
too_cold, // double cumul_val, // cumulative number of items to be ignored
false), // boolean high_marg)
getMarginFromHist(
hist, // histogram
too_hot, // double cumul_val, // cummulative number of items to be ignored
too_hot, // double cumul_val, // cumulative number of items to be ignored
true)}; // boolean high_marg)
double [] abs_lim = {
rel_lim[0] * (hard_hot - hard_cold) + hard_cold,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment