Commit 33bb38e2 authored by Andrey Filippov's avatar Andrey Filippov

Back propagation

parent 96a9bb0d
......@@ -55,7 +55,7 @@ public class ExportForGPUDevelopment {
saveFloatKernels(
kernel_dir + (quadCLT.isAux()?"aux":"main"), // String file_prefix,
(what_to_save[0][0]?quadCLT.getCLTKernels(): null), // double [][][][][][] clt_kernels, // null
(what_to_save[0][1]?quadCLT.image_data: null),
(what_to_save[0][1]?quadCLT.getImageData(): null),
(what_to_save[0][2]?port_xy: null), // double [][][] port_xy,
true);
} catch (IOException e) {
......
......@@ -1279,10 +1279,19 @@ public class GpuQuad{ // quad camera description
if (!force && this.gpuTileProcessor.bayer_set && !quadCLT.hasNewImageData()) {
return;
}
double [][] bayer_center = quadCLT.getImageCenter();
if (bayer_center != null) {
quadCLT.getResetImageCenter();
setBayerImage(
bayer_center,
true);
return;
}
double [][][] bayer_data = quadCLT.getResetImageData(); // resets hasNewImageData()
setBayerImages(
bayer_data,
true);
return;
}
/**
......@@ -1319,6 +1328,37 @@ public class GpuQuad{ // quad camera description
}
}
// Set same image to all sensors (for reverse conversion)
public void setBayerImage(
double [][] bayer_data,
boolean force) {
if (this.gpuTileProcessor.bayer_set && !force) {
return;
}
float [] fbayer = new float [bayer_data[0].length];
if (bayer_data[0] == null) {
System.out.println("BUG!! bayer_data[0] == null");
return;
}
for (int i = 0; i < bayer_data[0].length; i++) {
fbayer[i] = (float) (bayer_data[0][i]); // + bayer_data[ncam][1][i] + bayer_data[ncam][2][i]);
for (int j = 1; j < bayer_data.length; j++) {
fbayer[i] += (float) (bayer_data[j][i]);
}
}
for (int ncam = 0; ncam < getNumSensors(); ncam++) {
setBayerImage(
fbayer, // float [] bayer_image,
ncam); // int ncam)
}
this.gpuTileProcessor.bayer_set = true;
if (getGpu_debug_level() > -1) {
System.out.println("======setBayerImage()");
}
return;
}
// prepare tasks for full frame, same dispaity.
// need to run setTasks(TpTask [] tile_tasks, boolean use_aux) to format/transfer to GPU memory
/**
......@@ -1871,14 +1911,6 @@ public class GpuQuad{ // quad camera description
/**
* Direct CLT conversion and aberration correction
*/
public void execConvertDirect(int erase_clt) {
execConvertDirect(
false,
null,
erase_clt);
}
/**
* Convert and save TD representation in either normal or reference scene. Reference scene TD representation
* is used for interscene correlation (for "IMU")
* @param ref_scene save result into a separate buffer for interscene correlation when true.
......@@ -1900,11 +1932,14 @@ public class GpuQuad{ // quad camera description
IJ.showMessage("Error", "No GPU kernel: GPU_ERASE_CLT_TILES_kernel");
return;
}
if (!rectilinear) {
// if ((quadCLT == null) || (quadCLT.getImageCenter() != null)) { // ????????????????
if ((quadCLT != null) && (quadCLT.getImageCenter() == null)) {
setConvolutionKernels(false); // set kernels if they are not set already
}
setBayerImages(false); // set Bayer images if this.quadCLT instance has new ones
}
boolean skip_kernels = rectilinear || (quadCLT == null) || (quadCLT.getImageCenter() != null) || !this.gpuTileProcessor.kernels_set || (quadCLT.no_kernels);
int [] wh1 = handleWH(
wh, // int [] wh_in,
......@@ -1915,67 +1950,6 @@ public class GpuQuad{ // quad camera description
int tilesX = wh[0] / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
/*
if (wh == null) {
wh = new int[] {img_width, img_height};
}
// kernel parameters: pointer to pointers
int tilesX = wh[0] / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
// De-allocate if size mismatch, allocate if needed. Now it is the only place where clt is allocated
if (ref_scene) {
if ((gpu_clt_ref_wh != null) && ((gpu_clt_ref_wh[0] != wh[0]) || (gpu_clt_ref_wh[1] != wh[1]))) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_clt_ref_h[ncam]);
}
cuMemFree (gpu_clt_ref);
gpu_clt_ref = null;
gpu_clt_ref_wh = null;
}
if (gpu_clt_ref == null) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_ref_l = new long [num_cams];
gpu_clt_ref_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref_h[ncam],
tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
}
gpu_clt_ref = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref, num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_ref_h[ncam]);
}
cuMemcpyHtoD(gpu_clt_ref, Pointer.to(gpu_clt_ref_l), num_cams * Sizeof.POINTER);
gpu_clt_ref_wh = wh.clone();
}
} else { // same for main (not ref) memory
if ((gpu_clt_wh != null) && ((gpu_clt_wh[0] != wh[0]) || (gpu_clt_wh[1] != wh[1]))) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_clt_h[ncam]);
}
cuMemFree (gpu_clt);
gpu_clt = null;
gpu_clt_wh = null;
}
if (gpu_clt == null) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_l = new long [num_cams];
gpu_clt_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_h[ncam],
tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
}
gpu_clt = new CUdeviceptr();
cuMemAlloc(gpu_clt, num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_h[ncam]);
}
cuMemcpyHtoD(gpu_clt, Pointer.to(gpu_clt_l), num_cams * Sizeof.POINTER);
gpu_clt_wh = wh.clone();
}
}
*/
CUdeviceptr gpu_clt_selected = ref_scene ? gpu_clt_ref : gpu_clt;
int [] GridFullWarps = {1, 1, 1};
int [] ThreadsFullWarps = {1, 1, 1};
......@@ -2002,7 +1976,8 @@ public class GpuQuad{ // quad camera description
}
}
Pointer kernelParameters;
if (rectilinear) {
// boolean rectilinear_or_back = rectilinear || (quadCLT == null) || (quadCLT.getImageCenter() != null);
/* if (rectilinear) {
kernelParameters = Pointer.to(
Pointer.to(new int[] { num_cams}), // int num_cams,
Pointer.to(new int[] { num_colors}), // int num_colors,
......@@ -2025,6 +2000,7 @@ public class GpuQuad{ // quad camera description
);
} else {// !rectilinear (normal way)
*/
kernelParameters = Pointer.to(
Pointer.to(new int[] { num_cams}), // int num_cams,
Pointer.to(new int[] { num_colors}), // int num_colors,
......@@ -2039,13 +2015,15 @@ public class GpuQuad{ // quad camera description
Pointer.to(new int[] { 0 }), // lpf_mask
Pointer.to(new int[] { wh[0]}), // img_width}), // int woi_width,
Pointer.to(new int[] { wh[1]}), // img_height}), // int woi_height,
Pointer.to(new int[] { kernels_hor}), // int kernels_hor,
Pointer.to(new int[] { kernels_vert}), // int kernels_vert);
Pointer.to(new int[] { skip_kernels? 0 : kernels_hor}), // int kernels_hor,
Pointer.to(new int[] { skip_kernels? 0 : kernels_vert}), // int kernels_vert);
Pointer.to(gpu_active_tiles),
Pointer.to(gpu_num_active_tiles),
Pointer.to(new int[] { tilesX })
);
/*
}
*/
cuCtxSynchronize();
// Call the kernel function
cuLaunchKernel(this.gpuTileProcessor.GPU_CONVERT_DIRECT_kernel,
......@@ -4140,12 +4118,9 @@ public class GpuQuad{ // quad camera description
int out_width = getImageWidth();// + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int out_height = getImageHeight(); // + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
/// int gpu_height = (img_height + GPUTileProcessor.DTT_SIZE);
/// int gpu_width = (img_width + GPUTileProcessor.DTT_SIZE);
int gpu_height = (out_height + GPUTileProcessor.DTT_SIZE);
int gpu_width = (out_width + GPUTileProcessor.DTT_SIZE);
int gpu_img_size = gpu_width * gpu_height;
/// int rslt_img_size = img_height * img_width; // width * height;
int rslt_img_size = out_height * out_width; // width * height;
float [] cpu_corr_image = new float [ num_colors * gpu_img_size];
int gpu_width_in_bytes = gpu_width *Sizeof.FLOAT;
......@@ -4168,9 +4143,7 @@ public class GpuQuad{ // quad camera description
float [][] fimg = new float [num_colors][ rslt_img_size];
for (int ncol = 0; ncol < num_colors; ncol++) {
int tl_offset = (GPUTileProcessor.DTT_SIZE/2) * (gpu_width + 1) + ncol*gpu_img_size;
// for (int nrow=0; nrow < img_height; nrow++) {
for (int nrow=0; nrow < out_height; nrow++) {
// System.arraycopy(cpu_corr_image, tl_offset + (gpu_width * nrow), fimg[ncol], img_width * nrow, img_width);
System.arraycopy(cpu_corr_image, tl_offset + (gpu_width * nrow), fimg[ncol], out_width * nrow, out_width);
}
}
......@@ -4326,12 +4299,7 @@ public class GpuQuad{ // quad camera description
final boolean [] valid_tiles,
final int threadsMax) // maximal number of threads to launch
{
int num_pairs = Correlation2d.getNumPairs(num_cams);
//change to fixed 511?
// final int task_code = ((1 << num_pairs)-1) << GPUTileProcessor.TASK_CORR_BITS; // correlation only
final int task_code = (1 << GPUTileProcessor.TASK_CORR_EN) | (1 << GPUTileProcessor.TASK_INTER_EN);
final double min_px = margin;
final double max_px = geometryCorrection.getSensorWH()[0] - 1 - margin; // sensor width here, not window width
final double [] min_py = new double[num_cams] ;
......@@ -4419,6 +4387,7 @@ public class GpuQuad{ // quad camera description
public static TpTask[][] setInterTasksMotionBlur(
final int num_cams,
final int img_width, // should match pXpYD
......
......@@ -343,7 +343,7 @@ public class ImageDtt extends ImageDttCPU {
}
gpuQuad.execConvertDirect(-1); // boolean erase_clt
gpuQuad.execConvertDirect(false, null, -1); // boolean erase_clt
if (iclt_fimg != null) {
gpuQuad.execImcltRbgAll(isMonochrome()); // execute GPU kernel
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
......@@ -939,7 +939,7 @@ public class ImageDtt extends ImageDttCPU {
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect(-1); // boolean erase_clt
gpuQuad.execConvertDirect(false, null, -1); // boolean erase_clt
if (mcorr_sel == 0) { // no correlation at all
return;
}
......@@ -1106,7 +1106,7 @@ public class ImageDtt extends ImageDttCPU {
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect(-1); // boolean erase_clt
gpuQuad.execConvertDirect(false, null, -1); // boolean erase_clt
if (sensor_mask_inter == 0) { // no correlation at all
return;
}
......@@ -1322,7 +1322,7 @@ public class ImageDtt extends ImageDttCPU {
false); // boolean use_aux // while is it in class member? - just to be able to free
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect(-1); // Convert primary image, no erase (each tile will be SET as scales > 0
gpuQuad.execConvertDirect(false, null, -1); // Convert primary image, no erase (each tile will be SET as scales > 0
// set secondary tasks and perform direct conversion to TD, subtracting from the converted primary
gpuQuad.setTasks( // copy tp_tasks to the GPU memory
......@@ -1335,7 +1335,7 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad.updateTasks(
tp_tasks[1],
false); // boolean use_aux // while is it in class member? - just to be able to free
gpuQuad.execConvertDirect(-1); // Convert secondary image, no erase (each tile will be SUBTRACTED as scales < 0)
gpuQuad.execConvertDirect(false, null, -1); // Convert secondary image, no erase (each tile will be SUBTRACTED as scales < 0)
// continue as w/o Motion Blur in ( interCorrTD() )
if (sensor_mask_inter == 0) { // no correlation at all
......@@ -1388,6 +1388,18 @@ public class ImageDtt extends ImageDttCPU {
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
imgdtt_params, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tp_tasks, // final TpTask[] tp_tasks,
false, // final boolean keep_tiles_offsets, // keep per-sensors offsets in tp_tasks
gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
globalDebugLevel); // final int globalDebugLevel)
/*
final float [][] lpf_rgb = new float[][] {
floatGetCltLpfFd(gpu_sigma_r),
floatGetCltLpfFd(gpu_sigma_b),
......@@ -1407,6 +1419,7 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad.updateTasks(
tp_tasks,
false); // boolean use_aux // while is it in class member? - just to be able to free
*/
if (fclt != null) {
gpuQuad.handleWH( // allocate/reallocate GPU memory, that was normally done by gpuQuad.execConvertDirect()
wh, // int [] wh,
......@@ -1426,9 +1439,52 @@ public class ImageDtt extends ImageDttCPU {
} else {
gpuQuad.execConvertDirect(use_reference_buffer, wh, erase_clt); // put results into a "reference" buffer
}
return;
}
public void execConvertDirect(
boolean use_reference_buffer,
int [] wh,
int erase_clt) {
gpuQuad.execConvertDirect(use_reference_buffer, wh, erase_clt); // put results into a "reference" buffer
}
public void preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
final TpTask[] tp_tasks,
final boolean keep_tiles_offsets, // keep per-sensors offsets in tp_tasks
final double gpu_sigma_r, // 0.9, 1.1
final double gpu_sigma_b, // 0.9, 1.1
final double gpu_sigma_g, // 0.6, 0.7
final double gpu_sigma_m, // = 0.4; // 0.7;
final int globalDebugLevel)
{
final float [][] lpf_rgb = new float[][] {
floatGetCltLpfFd(gpu_sigma_r),
floatGetCltLpfFd(gpu_sigma_b),
floatGetCltLpfFd(gpu_sigma_g),
floatGetCltLpfFd(gpu_sigma_m)
};
gpuQuad.setLpfRbg( // constants memory - same for all cameras
lpf_rgb,
globalDebugLevel > 2);
gpuQuad.setTasks( // copy tp_tasks to the GPU memory
tp_tasks, // TpTask [] tile_tasks,
false, // use_aux); // boolean use_aux)
imgdtt_params.gpu_verify); // boolean verify
if (!keep_tiles_offsets) {
// Why always NON-UNIFORM grid? Already set in tp_tasks
gpuQuad.execSetTilesOffsets(false); // false); // prepare tiles offsets in GPU memory, using NON-UNIFORM grid (pre-calculated)
}
// update tp_tasks - execute even w/o execSetTilesOffsets
gpuQuad.updateTasks(
tp_tasks,
false); // boolean use_aux // while is it in class member? - just to be able to free
return;
}
public void setRectilinearReferenceTD(
final int erase_clt,
final float [] fpixels_ref,
......@@ -1467,7 +1523,7 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad.execConvertDirect(use_reference_buffer, wh, erase_clt); // put results into a "reference" buffer
}
public void setRectilinearReferenceTD_debug(
public void setRectilinearReferenceTD_debug( // not used
final int erase_clt,
final float [] fpixels_ref,
final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
......
......@@ -674,6 +674,11 @@ min_str_neib_fpn 0.35
public double cuas_max_abs_rowcol = 100.0; // consider pixels with abs(UM difference) does not exceed this value
public double cuas_outliers_rowcol = 0.001; // scale weight of the outliers with high difference (to prevent undefined values
public boolean cuas_reset_first= false; // Reset average in first scene (for large diffirence in unfinished row/col)
public int cuas_invert_margins = 1; // Expand image each side when inverting tasks
public int cuas_invert_iters = 4; // Enhance inversion iterations
public double cuas_invert_tolerance = 0.001; // Finish enhancing when last change was lower than
public int cuas_invert_gap2 = 10; // Maximal dual gap size for inversion (depends on scanning radius in tiles) <0 = use maximal possible
public boolean cuas_debug = false; // save debug images (and show them if not in batch mode)
public boolean cuas_step_debug = false; // save debug images during per-step cuas recalculation (and show them if not in batch mode)
......@@ -2025,6 +2030,15 @@ min_str_neib_fpn 0.35
"Scale weight of the outliers with high difference than above (to prevent undefined values).");
gd.addCheckbox ("Reset average on first sequence", this.cuas_reset_first,
"Reset average in first scene (for large diffirence in unfinished row/col.");
gd.addMessage("=== Tasks inversion ===");
gd.addNumericField("Inversion: add margins", this.cuas_invert_margins, 0,3,"tiles",
"Increase processing area each side when calculating tasks inversion.");
gd.addNumericField("Number of inverion iterations", this.cuas_invert_iters, 0,3,"",
"Number of iterations when enhancing tasks inversion.");
gd.addNumericField("Inversion tolerance", this.cuas_invert_tolerance, 5,7,"pix",
"Finish enhancing tasks inversion when last change was lower than this value.");
gd.addNumericField("Inversion gap (dual size)", this.cuas_invert_gap2, 0,3,"tiles",
"Maximal dual gap size for inversion (depends on scanning radius in tiles) .");
gd.addMessage("=== Debug ===");
gd.addCheckbox ("Save/show debug images", this.cuas_debug,
......@@ -2939,6 +2953,11 @@ min_str_neib_fpn 0.35
this.cuas_outliers_rowcol = gd.getNextNumber();
this.cuas_reset_first = gd.getNextBoolean();
this.cuas_invert_margins =(int) gd.getNextNumber();
this.cuas_invert_iters = (int) gd.getNextNumber();
this.cuas_invert_tolerance = gd.getNextNumber();
this.cuas_invert_gap2 = (int) gd.getNextNumber();
this.cuas_debug = gd.getNextBoolean();
this.cuas_step_debug = gd.getNextBoolean();
......@@ -3778,6 +3797,13 @@ min_str_neib_fpn 0.35
properties.setProperty(prefix+"cuas_max_abs_rowcol", this.cuas_max_abs_rowcol+""); // double
properties.setProperty(prefix+"cuas_outliers_rowcol", this.cuas_outliers_rowcol+"");// double
properties.setProperty(prefix+"cuas_reset_first", this.cuas_reset_first+""); // boolean
properties.setProperty(prefix+"cuas_invert_margins", this.cuas_invert_margins+""); // int
properties.setProperty(prefix+"cuas_invert_iters", this.cuas_invert_iters+""); // int
properties.setProperty(prefix+"cuas_invert_tolerance",this.cuas_invert_tolerance+"");// double
properties.setProperty(prefix+"cuas_invert_gap2", this.cuas_invert_gap2+""); // int
properties.setProperty(prefix+"cuas_debug", this.cuas_debug+""); // boolean
properties.setProperty(prefix+"cuas_step_debug", this.cuas_step_debug+""); // boolean
......@@ -4592,6 +4618,11 @@ min_str_neib_fpn 0.35
if (properties.getProperty(prefix+"cuas_outliers_rowcol")!=null) this.cuas_outliers_rowcol=Double.parseDouble(properties.getProperty(prefix+"cuas_outliers_rowcol"));
if (properties.getProperty(prefix+"cuas_reset_first")!=null) this.cuas_reset_first=Boolean.parseBoolean(properties.getProperty(prefix+"cuas_reset_first"));
if (properties.getProperty(prefix+"cuas_invert_margins")!=null) this.cuas_invert_margins=Integer.parseInt(properties.getProperty(prefix+"cuas_invert_margins"));
if (properties.getProperty(prefix+"cuas_invert_iters")!=null) this.cuas_invert_iters=Integer.parseInt(properties.getProperty(prefix+"cuas_invert_iters"));
if (properties.getProperty(prefix+"cuas_invert_tolerance")!=null)this.cuas_invert_tolerance=Double.parseDouble(properties.getProperty(prefix+"cuas_invert_tolerance"));
if (properties.getProperty(prefix+"cuas_invert_gap2")!=null) this.cuas_invert_gap2=Integer.parseInt(properties.getProperty(prefix+"cuas_invert_gap2"));
if (properties.getProperty(prefix+"cuas_debug")!=null) this.cuas_debug=Boolean.parseBoolean(properties.getProperty(prefix+"cuas_debug"));
if (properties.getProperty(prefix+"cuas_step_debug")!=null) this.cuas_step_debug=Boolean.parseBoolean(properties.getProperty(prefix+"cuas_step_debug"));
......@@ -5407,6 +5438,11 @@ min_str_neib_fpn 0.35
imp.cuas_outliers_rowcol = this.cuas_outliers_rowcol;
imp.cuas_reset_first = this.cuas_reset_first;
imp.cuas_invert_margins = this.cuas_invert_margins;
imp.cuas_invert_iters = this.cuas_invert_iters;
imp.cuas_invert_tolerance = this.cuas_invert_tolerance;
imp.cuas_invert_gap2 = this.cuas_invert_gap2;
imp.cuas_debug = this.cuas_debug;
imp.cuas_step_debug = this.cuas_step_debug;
......
......@@ -123,6 +123,7 @@ public class QuadCLT extends QuadCLTCPU {
}
}
public ImagePlus showCenterClt(
float [][] fclt, // may be null
CLTParameters clt_parameters,
......@@ -166,24 +167,42 @@ public class QuadCLT extends QuadCLTCPU {
}
return imp_virtual;
}
@Deprecated
public ImagePlus showCenterCltWeights(
CLTParameters clt_parameters) {
if (getCenterCltWeights() == null) {
System.out.println("showCenterCltWeights(): not a center CLT");
public double [][] convertCenterClt(
float [][] fclt){ // may be null
if (getCenterClt() == null) {
System.out.println("convertCenterClt(): not a center CLT");
return null;
}
String title = getImageName()+"-center_clt_weights";
ImagePlus imp = ShowDoubleFloatArrays.makeArrays(
getCenterCltWeights(), // float[] pixels,
getTilesX(), // int width,
getTilesY(), // int height,
title); // String title)
imp.show();
return imp;
int sensor_mask_clt = 1; // just one
setQuadClt();
int tilesX = getTilesX();
int tilesY = getTilesY();
int tile_size = getTileSize();
int [] wh = {tilesX*tile_size,tilesY*tile_size};
getGPUQuad().handleWH(
wh, // int [] wh,
false); // boolean ref_scene)
/// getGPUQuad().handleWH(
/// wh, // int [] wh,
/// true); // boolean ref_scene)
/// int [] whc = new int[3];
setComboToTD(
fclt, // new float [][] {center_CLT.getCenterClt()}, // ,combo_seq_clt, // final float [][] fclt,
true, // merge_clt, // final boolean merge_channels, // duplicate same data to all selected channels
sensor_mask_clt, // final int sensor_mask, // only if merge_channels
null, // whc, // final int [] whc, // if int[2], will return width, height
false); // final boolean use_reference);
double [][] result = renderDoubleFromTD (
sensor_mask_clt, // final int sensor_mask,
wh, // null, // int [] wh,
false); // boolean use_reference
return result;
}
/**
* Remove weak non-LMA tiles if they do not have any LMA or strong neighbors and
* too few weak neighbors. Single strong neighbor within range is enough, strong/LMA
......@@ -3104,27 +3123,6 @@ public class QuadCLT extends QuadCLTCPU {
}
/*
float [][] fclt = gpuQuad.getCltData(
false);// boolean use_ref);
System.out.println("Got CLT ["+fclt.length+"]["+fclt[0].length+"]");
ShowDoubleFloatArrays.showArrays(
fclt,
gpuQuad.img_width*2,
gpuQuad.img_height*2,
true,
"fclt_raw"); // , dbg_titles);
float [][] pfclt = gpuQuad.presentCltData(false);
ShowDoubleFloatArrays.showArrays(
pfclt,
gpuQuad.img_width*2,
gpuQuad.img_height*2,
true,
"fclt"); // , dbg_titles);
*/
// add similar for textures
public ImagePlus renderFromTD (
int sensor_mask,
......@@ -3255,7 +3253,6 @@ public class QuadCLT extends QuadCLTCPU {
*/
public double [][] renderDoubleFromTD ( // [scene][color][pixel]
int sensor_mask,
// CLTParameters clt_parameters,
int [] wh, // may be null, or {width, height}
boolean use_reference
) {
......@@ -3297,6 +3294,64 @@ public class QuadCLT extends QuadCLTCPU {
return iclt_fimg_combo;
}
public double [][][] renderDoubleFromTD ( // [scene][color][pixel]
int [] wh, // may be null, or {width, height}
boolean use_reference
) {
gpuQuad.execImcltRbgAll(
isMonochrome(),
use_reference,
wh); //int [] wh
// get data back from GPU
final int fnum_sens = getNumSensors();
final float [][][] iclt_fimg = new float [fnum_sens][][];
int num_col = 0;
int num_pix = 0;
for (int nsens = 0; nsens < iclt_fimg.length; nsens++) {
iclt_fimg[nsens] = gpuQuad.getRBG(nsens); // updated window
num_col = iclt_fimg[nsens].length;
num_pix = iclt_fimg[nsens][0].length;
}
final int fnum_pix = num_pix;
final int fnum_col = num_col;
final double [][][] dclt_fimg_combo = new double [fnum_sens][fnum_col][fnum_pix];
final Thread[] threads = ImageDtt.newThreadArray(THREADS_MAX);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nSens = ai.getAndIncrement(); nSens < fnum_sens; nSens = ai.getAndIncrement()) {
for (int ncol = 0; ncol < fnum_col; ncol++) if (iclt_fimg[nSens][ncol] != null) {
for (int npix = 0; npix < fnum_pix; npix++) { // pixel
dclt_fimg_combo[nSens][ncol][npix] = iclt_fimg[nSens][ncol][npix];
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return dclt_fimg_combo;
}
/*
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < iclt_fimg_combo[0].length; nTile = ai.getAndIncrement()) {
for (int ncol = 0; ncol < iclt_fimg_combo.length; ncol++) {
double d = 0;
for (int i = 0; i < iclt_fimg.length; i++) if (iclt_fimg[i] != null) {
d+=iclt_fimg[i][ncol][nTile];
}
iclt_fimg_combo[ncol][nTile] = d * scale; // (float) (d * scale);
}
}
}
};
}
*/
......@@ -3441,7 +3496,7 @@ public class QuadCLT extends QuadCLTCPU {
}
gpuQuad.execConvertDirect(-1); // boolean erase_clt
gpuQuad.execConvertDirect(false, null, -1); // boolean erase_clt
float [][] fclt = gpuQuad.getCltData(
false);// boolean use_ref);
......@@ -4063,7 +4118,7 @@ public class QuadCLT extends QuadCLTCPU {
for (int i = 0; i < NREPEAT; i++ ) {
// Direct CLT conversion and aberration correction
quadCLT_main.getGPU().execConvertDirect(-1); // boolean erase_clt
quadCLT_main.getGPU().execConvertDirect(false, null, -1); // boolean erase_clt
}
long startIMCLT=System.nanoTime();
......@@ -5589,7 +5644,7 @@ public class QuadCLT extends QuadCLTCPU {
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
tile_op, // final int [][] tile_op, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
image_data, // final double [][][] imade_data, // first index - number of image in a quad
getImageData(), // final double [][][] imade_data, // first index - number of image in a quad
saturation_imp, // final boolean [][] saturation_imp, // (near) saturated pixels or null
tilesX * image_dtt.transform_size, // final int width,
clt_parameters.getFatZero(isMonochrome()), // final double corr_fat_zero, // add to denominator to modify phase correlation (same units as data1, data2). <0 - pure sum
......
......@@ -2703,7 +2703,7 @@ public class TexturedModel {
final String dbg_prefix = batch_mode ? null : ref_scene.getImageName()+"-";
if (ref_scene.image_data == null){
if (ref_scene.getImageData() == null){
return false; // not used in lwir
}
double infinity_disparity = ref_scene.getGeometryCorrection().getDisparityFromZ(clt_parameters.infinityDistance);
......
......@@ -742,8 +742,8 @@ public class TwoQuadCLT {
clt_parameters.rig.no_int_x0, // boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
tile_op_main, // final int [][] tile_op_main, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array_main, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
quadCLT_main.image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.getImageData(), // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.getImageData(), // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.saturation_imp, // final boolean [][] saturation_main, // (near) saturated pixels or null
quadCLT_aux.saturation_imp, // final boolean [][] saturation_aux, // (near) saturated pixels or null
// correlation results - combo will be for the correation between two quad cameras
......@@ -1259,13 +1259,15 @@ public class TwoQuadCLT {
clt_parameters.getScaleStrength(false));
double [][] ml_data = null;
double [][][] ers_delay = get_ers?(new double [2][][]):null;
final double[][][] main_image_data = quadCLT_main.getImageData();
final double[][][] aux_image_data = quadCLT_aux.getImageData();
// here all data is ready (images, kernels) to try GPU code
float [][] main_bayer = new float [quadCLT_main.image_data.length][quadCLT_main.image_data[0][0].length];
float [][] dst_bayer = new float [quadCLT_main.image_data.length][quadCLT_main.image_data[0][0].length];
float [][] main_bayer = new float [main_image_data.length][main_image_data[0][0].length];
float [][] dst_bayer = new float [main_image_data.length][main_image_data[0][0].length];
for (int nc = 0; nc < main_bayer.length; nc++) {
int nc1 = (nc +1) % 4;
for (int i = 0; i < main_bayer[nc].length; i++) {
main_bayer[nc1][i] = (float) (quadCLT_main.image_data[nc][0][i] + quadCLT_main.image_data[nc][1][i] + quadCLT_main.image_data[nc][2][i]);
main_bayer[nc1][i] = (float) (main_image_data[nc][0][i] + main_image_data[nc][1][i] + main_image_data[nc][2][i]);
dst_bayer[nc][i]= nc*main_bayer[nc].length + i;
}
}
......@@ -1284,8 +1286,8 @@ public class TwoQuadCLT {
clt_parameters.rig.no_int_x0, // boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
tile_op_main, // final int [][] tile_op_main, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array_main, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
quadCLT_main.image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
main_image_data, // final double [][][] image_data_main, // first index - number of image in a quad
aux_image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.saturation_imp, // final boolean [][] saturation_main, // (near) saturated pixels or null
quadCLT_aux.saturation_imp, // final boolean [][] saturation_aux, // (near) saturated pixels or null
// correlation results - combo will be for the correation between two quad cameras
......@@ -1385,7 +1387,7 @@ public class TwoQuadCLT {
ExportForGPUDevelopment.saveFloatKernels(
kernel_dir +"main", // String file_prefix,
(what_to_save[0][0]?quadCLT_main.getCLTKernels():null), // double [][][][][][] clt_kernels, // null
(what_to_save[0][1]?quadCLT_main.image_data:null),
(what_to_save[0][1]?main_image_data:null),
(what_to_save[0][2]?port_xy_main_dbg:null), // double [][][] port_xy,
true);
} catch (IOException e) {
......@@ -1398,7 +1400,7 @@ public class TwoQuadCLT {
ExportForGPUDevelopment.saveFloatKernels(
kernel_dir +"aux", // String file_prefix,
(what_to_save[1][0]?quadCLT_aux.getCLTKernels():null), // double [][][][][][] clt_kernels, // null
(what_to_save[1][1]?quadCLT_aux.image_data:null),
(what_to_save[1][1]?aux_image_data:null),
(what_to_save[1][2]?port_xy_aux_dbg:null), // double [][][] port_xy,
true);
} catch (IOException e) {
......@@ -1878,8 +1880,8 @@ public class TwoQuadCLT {
String name_aux = (String) imp_quad_main[0].getProperty("name");
quadCLT_main.image_name = name_main;
quadCLT_aux.image_name = name_aux;
quadCLT_main.image_data = double_stacks_main;
quadCLT_aux.image_data = double_stacks_aux;
main_image_data = double_stacks_main;
aux_image_data = double_stacks_aux;
quadCLT_main.saturation_imp = saturation_main;
quadCLT_aux.saturation_imp = saturation_aux;
// quadCLT_main.tp.resetCLTPasses();
......@@ -2962,8 +2964,8 @@ if (debugLevel > -100) return true; // temporarily !
true, // boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
tile_op, // final int [][] tile_op_main, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
quadCLT_main.image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.getImageData(), // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.getImageData(), // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.saturation_imp, // final boolean [][] saturation_main, // (near) saturated pixels or null
quadCLT_aux.saturation_imp, // final boolean [][] saturation_aux, // (near) saturated pixels or null
// correlation results - combo will be for the correation between two quad cameras
......@@ -7081,8 +7083,8 @@ if (debugLevel > -100) return true; // temporarily !
no_int_x0, // final boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
tile_op, // final int [][] tile_op_main, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
quadCLT_main.image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.getImageData(), // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.getImageData(), // final double [][][] image_data_aux, // first index - number of image in a quad
quadCLT_main.saturation_imp, // final boolean [][] saturation_main, // (near) saturated pixels or null
quadCLT_aux.saturation_imp, // final boolean [][] saturation_aux, // (near) saturated pixels or null
// correlation results - combo will be for the correation between two quad cameras
......@@ -7251,8 +7253,8 @@ if (debugLevel > -100) return true; // temporarily !
no_int_x0, // final boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
tile_op, // final int [][] tile_op_main, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
null, // quadCLT_main.image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.image_data, // final double [][][] image_data_aux, // first index - number of image in a quad
null, // main_image_data, // final double [][][] image_data_main, // first index - number of image in a quad
quadCLT_aux.getImageData(), // final double [][][] image_data_aux, // first index - number of image in a quad
null, // quadCLT_main.saturation_imp, // final boolean [][] saturation_main, // (near) saturated pixels or null
quadCLT_aux.saturation_imp, // final boolean [][] saturation_aux, // (near) saturated pixels or null
// correlation results - combo will be for the correation between two quad cameras
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment