Commit 33977018 authored by Andrey Filippov's avatar Andrey Filippov

Working on extended window (more than reference scene sesnor view)

parent b4d88911
......@@ -5,6 +5,7 @@ import static jcuda.driver.JCudaDriver.cuCtxSynchronize;
import static jcuda.driver.JCudaDriver.cuLaunchKernel;
import static jcuda.driver.JCudaDriver.cuMemAlloc;
import static jcuda.driver.JCudaDriver.cuMemAllocPitch;
import static jcuda.driver.JCudaDriver.cuMemFree;
import static jcuda.driver.JCudaDriver.cuMemcpy2D;
import static jcuda.driver.JCudaDriver.cuMemcpyDtoH;
import static jcuda.driver.JCudaDriver.cuMemcpyHtoD;
......@@ -57,9 +58,9 @@ public class GpuQuad{ // quad camera description
private CUdeviceptr [] gpu_kernels_h;
private CUdeviceptr [] gpu_kernel_offsets_h;
private CUdeviceptr [] gpu_bayer_h;
private CUdeviceptr [] gpu_clt_h;
private CUdeviceptr [] gpu_clt_h = null;
private CUdeviceptr [] gpu_clt_ref_h = null;
private CUdeviceptr [] gpu_corr_images_h;
private CUdeviceptr [] gpu_corr_images_h = null;
// GPU pointers to array of GPU pointers
private CUdeviceptr gpu_kernels;
private CUdeviceptr gpu_kernel_offsets;
......@@ -73,9 +74,13 @@ public class GpuQuad{ // quad camera description
private CUdeviceptr gpu_corrs_combo_td;
private CUdeviceptr gpu_textures;
private CUdeviceptr gpu_clt;
private CUdeviceptr gpu_clt = null;
private CUdeviceptr gpu_clt_ref = null; // will be allocated when first needed?
private CUdeviceptr gpu_4_images; // may actually be 16
private CUdeviceptr gpu_4_images = null; // may actually be 16
/// will store w,h (in pixels) when allocating /reallocating memory
private int [] gpu_clt_wh = null;
private int [] gpu_clt_ref_wh = null;
private int [] gpu_4_images_wh = null;
private CUdeviceptr gpu_corr_indices;
private CUdeviceptr gpu_corr_combo_indices;
private CUdeviceptr gpu_num_corr_tiles;
......@@ -279,8 +284,8 @@ public class GpuQuad{ // quad camera description
gpu_corrs_combo_td = new CUdeviceptr(); // allocate tilesX * tilesY * 4 * DTT_SIZE * DTT_SIZE * Sizeof.FLOAT
gpu_textures = new CUdeviceptr(); // allocate tilesX * tilesY * ? * 256 * Sizeof.FLOAT
gpu_clt = new CUdeviceptr();
gpu_4_images = new CUdeviceptr();
/// gpu_clt = new CUdeviceptr();
/// gpu_4_images = new CUdeviceptr();
gpu_corr_indices = new CUdeviceptr(); // allocate tilesX * tilesY * 6 * Sizeof.FLOAT
// May add separate gpu_corr_indices_td here
gpu_corr_combo_indices = new CUdeviceptr(); // allocate tilesX * tilesY * 1 * Sizeof.FLOAT
......@@ -324,6 +329,9 @@ public class GpuQuad{ // quad camera description
img_height, // long Height,
Sizeof.FLOAT); // int ElementSizeBytes)
mclt_stride = (int)(device_stride[0] / Sizeof.FLOAT);
// Maybe move _bayer to use variable width/height as gpu_clt, gpu_corr_images_h
/*
gpu_corr_images_h[ncam] = new CUdeviceptr();
cuMemAllocPitch (
gpu_corr_images_h[ncam], // CUdeviceptr dptr,
......@@ -332,8 +340,9 @@ public class GpuQuad{ // quad camera description
3*(img_height + GPUTileProcessor.DTT_SIZE),// long Height,
Sizeof.FLOAT); // int ElementSizeBytes)
imclt_stride = (int)(device_stride[0] / Sizeof.FLOAT);
gpu_clt_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_h[ncam],tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT ); // public static int cuMemAlloc(CUdeviceptr dptr, long bytesize)
*/
/// gpu_clt_h[ncam] = new CUdeviceptr();
/// cuMemAlloc(gpu_clt_h[ncam],tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT ); // public static int cuMemAlloc(CUdeviceptr dptr, long bytesize)
}
// now create device arrays pointers
if (Sizeof.POINTER != Sizeof.LONG) {
......@@ -344,14 +353,14 @@ public class GpuQuad{ // quad camera description
cuMemAlloc(gpu_kernels, num_cams * Sizeof.POINTER);
cuMemAlloc(gpu_kernel_offsets, num_cams * Sizeof.POINTER);
cuMemAlloc(gpu_bayer, num_cams * Sizeof.POINTER);
cuMemAlloc(gpu_clt, num_cams * Sizeof.POINTER);
cuMemAlloc(gpu_4_images, num_cams * Sizeof.POINTER);
/// cuMemAlloc(gpu_clt, num_cams * Sizeof.POINTER);
/// cuMemAlloc(gpu_4_images, num_cams * Sizeof.POINTER);
long [] gpu_kernels_l = new long [num_cams];
long [] gpu_kernel_offsets_l = new long [num_cams];
long [] gpu_bayer_l = new long [num_cams];
long [] gpu_clt_l = new long [num_cams];
long [] gpu_4_images_l = new long [num_cams];
/// long [] gpu_clt_l = new long [num_cams];
/// long [] gpu_4_images_l = new long [num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) gpu_kernels_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_kernels_h[ncam]);
cuMemcpyHtoD(gpu_kernels, Pointer.to(gpu_kernels_l), num_cams * Sizeof.POINTER);
......@@ -362,11 +371,11 @@ public class GpuQuad{ // quad camera description
for (int ncam = 0; ncam < num_cams; ncam++) gpu_bayer_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_bayer_h[ncam]);
cuMemcpyHtoD(gpu_bayer, Pointer.to(gpu_bayer_l), num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) gpu_clt_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_h[ncam]);
cuMemcpyHtoD(gpu_clt, Pointer.to(gpu_clt_l), num_cams * Sizeof.POINTER);
/// for (int ncam = 0; ncam < num_cams; ncam++) gpu_clt_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_h[ncam]);
/// cuMemcpyHtoD(gpu_clt, Pointer.to(gpu_clt_l), num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) gpu_4_images_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_corr_images_h[ncam]);
cuMemcpyHtoD(gpu_4_images, Pointer.to(gpu_4_images_l), num_cams * Sizeof.POINTER);
/// for (int ncam = 0; ncam < num_cams; ncam++) gpu_4_images_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_corr_images_h[ncam]);
/// cuMemcpyHtoD(gpu_4_images, Pointer.to(gpu_4_images_l), num_cams * Sizeof.POINTER);
// Set GeometryCorrection data
cuMemAlloc(gpu_geometry_correction, GeometryCorrection.arrayLength(GPUTileProcessor.MAX_NUM_CAMS) * Sizeof.FLOAT); // always maximal number of cameras (sparse)
......@@ -465,11 +474,11 @@ public class GpuQuad{ // quad camera description
texture_stride_rgba = (int)(device_stride[0] / Sizeof.FLOAT);
}
public int getTilesX() {
return img_width / GPUTileProcessor.DTT_SIZE;
return getImageWidth() / GPUTileProcessor.DTT_SIZE;
}
public int getTilesY() {
return img_height / GPUTileProcessor.DTT_SIZE;
return getImageHeight() / GPUTileProcessor.DTT_SIZE;
}
public void resetGeometryCorrection() {
......@@ -486,8 +495,14 @@ public class GpuQuad{ // quad camera description
System.out.println("======resetGeometryCorrectionVector()");
}
}
public int getImageWidth() {return this.img_width;}
public int getImageHeight() {return this.img_height;}
// public int getImageWidth() {return this.img_width;}
// public int getImageHeight() {return this.img_height;}
public int getImageWidth() {return (gpu_4_images_wh== null) ? getSensorWidth(): gpu_4_images_wh[0];}
public int getImageHeight() {return (gpu_4_images_wh== null) ? getSensorHeight(): gpu_4_images_wh[1];} // valid after last IMCLT
public int getSensorWidth() {return this.img_width;}
public int getSensorHeight() {return this.img_height;}
public int getDttSize() {return GPUTileProcessor.DTT_SIZE;}
// public int getNumCams() {return GPUTileProcessor.NUM_CAMS;}
public int getSensorMaskInter() {return sensor_mask_inter;}
......@@ -1450,30 +1465,97 @@ public class GpuQuad{ // quad camera description
* Direct CLT conversion and aberration correction
*/
public void execConvertDirect() {
execConvertDirect(false);
execConvertDirect(
false,
null);
}
/**
* Convert and save TD representation in either normal or reference scene. Reference scene TD representation
* is used for interscene correlation (for "IMU")
* @param ref_scene save result into a separate buffer for interscene correlation when true.
* @param wh window width, height (or null)
*/
public void execConvertDirect(boolean ref_scene) {
public void execConvertDirect(
boolean ref_scene,
int [] wh) {
if (this.gpuTileProcessor.GPU_CONVERT_DIRECT_kernel == null)
{
IJ.showMessage("Error", "No GPU kernel: GPU_CONVERT_DIRECT_kernel");
return;
}
if (wh == null) {
wh = new int[] {img_width, img_height};
}
setConvolutionKernels(false); // set kernels if they are not set already
setBayerImages(false); // set Bayer images if this.quadCLT instance has new ones
// kernel parameters: pointer to pointers
int tilesX = img_width / GPUTileProcessor.DTT_SIZE;
// int tilesX = img_width / GPUTileProcessor.DTT_SIZE;
int tilesX = wh[0] / GPUTileProcessor.DTT_SIZE;
// De-allocate if size mismatch, allocate if needed. Now it is the only place where clt is allocated
if (ref_scene) {
if ((gpu_clt_ref_wh != null) && ((gpu_clt_ref_wh[0] != wh[0]) || (gpu_clt_ref_wh[1] != wh[1]))) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_clt_ref_h[ncam]);
}
cuMemFree (gpu_clt_ref);
gpu_clt_ref = null;
gpu_clt_ref_wh = null;
}
if (gpu_clt_ref == null) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_ref_l = new long [num_cams];
// int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
gpu_clt_ref_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref_h[ncam],
tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
}
gpu_clt_ref = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref, num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_ref_h[ncam]);
}
cuMemcpyHtoD(gpu_clt_ref, Pointer.to(gpu_clt_ref_l), num_cams * Sizeof.POINTER);
gpu_clt_ref_wh = wh.clone();
}
} else { // same for main (not ref) memory
if ((gpu_clt_wh != null) && ((gpu_clt_wh[0] != wh[0]) || (gpu_clt_wh[1] != wh[1]))) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_clt_h[ncam]);
}
cuMemFree (gpu_clt);
gpu_clt = null;
gpu_clt_wh = null;
}
if (gpu_clt == null) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_l = new long [num_cams];
// int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
gpu_clt_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_h[ncam],
tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
}
gpu_clt = new CUdeviceptr();
cuMemAlloc(gpu_clt, num_cams * Sizeof.POINTER);
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_h[ncam]);
}
cuMemcpyHtoD(gpu_clt, Pointer.to(gpu_clt_l), num_cams * Sizeof.POINTER);
gpu_clt_wh = wh.clone();
}
}
/*
if (ref_scene && (gpu_clt_ref == null)) { // Allocate memory, create pointers for reference scene TD representation
long [] gpu_clt_ref_l = new long [num_cams];
int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
gpu_clt_ref_h = new CUdeviceptr[num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_clt_ref_h[ncam] = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref_h[ncam],tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
cuMemAlloc(gpu_clt_ref_h[ncam],
tilesY * tilesX * num_colors * 4 * GPUTileProcessor.DTT_SIZE * GPUTileProcessor.DTT_SIZE * Sizeof.FLOAT );
}
gpu_clt_ref = new CUdeviceptr();
cuMemAlloc(gpu_clt_ref, num_cams * Sizeof.POINTER);
......@@ -1481,7 +1563,9 @@ public class GpuQuad{ // quad camera description
gpu_clt_ref_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_clt_ref_h[ncam]);
}
cuMemcpyHtoD(gpu_clt_ref, Pointer.to(gpu_clt_ref_l), num_cams * Sizeof.POINTER);
gpu_clt_ref_wh = wh.clone();
}
*/
CUdeviceptr gpu_clt_selected = ref_scene ? gpu_clt_ref : gpu_clt;
int [] GridFullWarps = {1, 1, 1};
int [] ThreadsFullWarps = {1, 1, 1};
......@@ -1497,8 +1581,8 @@ public class GpuQuad{ // quad camera description
Pointer.to(new int[] { num_task_tiles }),
// move lpf to 4-image generator kernel - DONE
Pointer.to(new int[] { 0 }), // lpf_mask
Pointer.to(new int[] { img_width}), // int woi_width,
Pointer.to(new int[] { img_height}), // int woi_height,
Pointer.to(new int[] { wh[0]}), // img_width}), // int woi_width,
Pointer.to(new int[] { wh[1]}), // img_height}), // int woi_height,
Pointer.to(new int[] { kernels_hor}), // int kernels_hor,
Pointer.to(new int[] { kernels_vert}), // int kernels_vert);
Pointer.to(gpu_active_tiles),
......@@ -1527,21 +1611,59 @@ public class GpuQuad{ // quad camera description
boolean is_mono) {
execImcltRbgAll(
is_mono,
false);
false,
null ); //int [] wh
}
public void execImcltRbgAll(
public void execImcltRbgAll( // Now allocates/re-allocates GPU memory
boolean is_mono,
boolean ref_scene
) {
boolean ref_scene,
int [] wh) {
if (this.gpuTileProcessor.GPU_IMCLT_ALL_kernel == null)
{
IJ.showMessage("Error", "No GPU kernel: GPU_IMCLT_ALL_kernel");
return;
}
if (wh == null) {
wh = new int[] {img_width, img_height};
}
int apply_lpf = 1;
int tilesX = img_width / GPUTileProcessor.DTT_SIZE;
int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
/// int tilesX = img_width / GPUTileProcessor.DTT_SIZE;
/// int tilesY = img_height / GPUTileProcessor.DTT_SIZE;
int tilesX = wh[0] / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
// Free if allocated but size mismatch
if ((gpu_4_images_wh != null) && ((gpu_4_images_wh[0] != wh[0]) || (gpu_4_images_wh[1] != wh[1]))) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_corr_images_h[ncam]);
}
cuMemFree (gpu_4_images);
gpu_4_images = null;
gpu_4_images_wh = null;
}
// Allocate if was not allocated or was freed
if (gpu_4_images == null) { // Allocate memory, create pointers
long [] device_stride = new long [1];
for (int ncam = 0; ncam < num_cams; ncam++) {
gpu_corr_images_h[ncam] = new CUdeviceptr();
cuMemAllocPitch (
gpu_corr_images_h[ncam], // CUdeviceptr dptr,
device_stride, // long[] pPitch,
// (img_width + GPUTileProcessor.DTT_SIZE) * Sizeof.FLOAT, // long WidthInBytes,
(wh[0] + GPUTileProcessor.DTT_SIZE) * Sizeof.FLOAT, // long WidthInBytes,
// 3*(img_height + GPUTileProcessor.DTT_SIZE),// long Height,
3*(wh[1] + GPUTileProcessor.DTT_SIZE),// long Height, //FIXME*** No need for 3x? ****
Sizeof.FLOAT); // int ElementSizeBytes)
imclt_stride = (int)(device_stride[0] / Sizeof.FLOAT);
}
gpu_4_images = new CUdeviceptr();
cuMemAlloc(gpu_4_images, num_cams * Sizeof.POINTER);
long [] gpu_4_images_l = new long [num_cams];
for (int ncam = 0; ncam < num_cams; ncam++) gpu_4_images_l[ncam] = GPUTileProcessor.getPointerAddress(gpu_corr_images_h[ncam]);
cuMemcpyHtoD(gpu_4_images, Pointer.to(gpu_4_images_l), num_cams * Sizeof.POINTER);
gpu_4_images_wh = wh.clone();
}
CUdeviceptr gpu_clt_selected = ref_scene ? gpu_clt_ref : gpu_clt;
int [] ThreadsFullWarps = {1, 1, 1};
int [] GridFullWarps = {1, 1, 1};
......@@ -3350,10 +3472,16 @@ public class GpuQuad{ // quad camera description
}
public float [][] getRBG (int ncam){
int gpu_height = (img_height + GPUTileProcessor.DTT_SIZE);
int gpu_width = (img_width + GPUTileProcessor.DTT_SIZE);
int out_width = getImageWidth();// + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int out_height = getImageHeight(); // + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
/// int gpu_height = (img_height + GPUTileProcessor.DTT_SIZE);
/// int gpu_width = (img_width + GPUTileProcessor.DTT_SIZE);
int gpu_height = (out_height + GPUTileProcessor.DTT_SIZE);
int gpu_width = (out_width + GPUTileProcessor.DTT_SIZE);
int gpu_img_size = gpu_width * gpu_height;
int rslt_img_size = img_height * img_width; // width * height;
/// int rslt_img_size = img_height * img_width; // width * height;
int rslt_img_size = out_height * out_width; // width * height;
float [] cpu_corr_image = new float [ num_colors * gpu_img_size];
int gpu_width_in_bytes = gpu_width *Sizeof.FLOAT;
......@@ -3375,46 +3503,16 @@ public class GpuQuad{ // quad camera description
float [][] fimg = new float [num_colors][ rslt_img_size];
for (int ncol = 0; ncol < num_colors; ncol++) {
int tl_offset = (GPUTileProcessor.DTT_SIZE/2) * (gpu_width + 1) + ncol*gpu_img_size;
for (int nrow=0; nrow < img_height; nrow++) {
// System.arraycopy(cpu_corr_image, ncol*gpu_img_size, fimg[ncol], 0, rslt_img_size);
System.arraycopy(cpu_corr_image, tl_offset + (gpu_width * nrow), fimg[ncol], img_width * nrow, img_width);
}
}
return fimg;
// for (int nrow=0; nrow < img_height; nrow++) {
for (int nrow=0; nrow < out_height; nrow++) {
// System.arraycopy(cpu_corr_image, tl_offset + (gpu_width * nrow), fimg[ncol], img_width * nrow, img_width);
System.arraycopy(cpu_corr_image, tl_offset + (gpu_width * nrow), fimg[ncol], out_width * nrow, out_width);
}
@Deprecated
public float [][] getRBGuntrimmed (int ncam){
int height = (img_height + GPUTileProcessor.DTT_SIZE);
int width = (img_width + GPUTileProcessor.DTT_SIZE);
int rslt_img_size = width * height;
float [] cpu_corr_image = new float [ num_colors * rslt_img_size];
int width_in_bytes = width *Sizeof.FLOAT;
// for copying results to host
CUDA_MEMCPY2D copyD2H = new CUDA_MEMCPY2D();
copyD2H.srcMemoryType = CUmemorytype.CU_MEMORYTYPE_DEVICE;
copyD2H.srcDevice = gpu_corr_images_h[ncam]; // ((test & 1) ==0) ? src_dpointer : dst_dpointer; // copy same data
copyD2H.srcPitch = imclt_stride*Sizeof.FLOAT;
copyD2H.dstMemoryType = CUmemorytype.CU_MEMORYTYPE_HOST;
copyD2H.dstHost = Pointer.to(cpu_corr_image);
copyD2H.dstPitch = width_in_bytes;
copyD2H.WidthInBytes = width_in_bytes;
copyD2H.Height = num_colors * height; // /2;
cuMemcpy2D(copyD2H); // run copy
float [][] fimg = new float [num_colors][ rslt_img_size];
for (int ncol = 0; ncol < num_colors; ncol++) {
System.arraycopy(cpu_corr_image, ncol*rslt_img_size, fimg[ncol], 0, rslt_img_size);
}
return fimg;
}
@Deprecated
public void getTileSubcamOffsets(
final TpTask[] tp_tasks, // will use // modify to have offsets for 8 cameras
......@@ -3553,7 +3651,7 @@ public class GpuQuad{ // quad camera description
public static TpTask[] setInterTasks(
final int num_cams,
final int img_width,
final int img_width, // should match pXpYD
final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
final boolean [] selection, // may be null, if not null do not process unselected tiles
......@@ -3567,7 +3665,8 @@ public class GpuQuad{ // quad camera description
//change to fixed 511?
final int task_code = ((1 << num_pairs)-1) << GPUTileProcessor.TASK_CORR_BITS; // correlation only
final double min_px = margin;
final double max_px = img_width - 1 - margin;
// final double max_px = img_width - 1 - margin;
final double max_px = geometryCorrection.getSensorWH()[0] - 1 - margin; // sensor width here, not window width
final double [] min_py = new double[num_cams] ;
final double [] max_py = new double[num_cams] ;
for (int i = 0; i < num_cams; i++) {
......@@ -3598,8 +3697,6 @@ public class GpuQuad{ // quad camera description
int tileY = nTile / tilesX;
int tileX = nTile % tilesX;
TpTask tp_task = new TpTask(num_cams, tileX, tileY);
// tp_task.ty = tileY;
// tp_task.tx = tileX;
tp_task.task = task_code;
double disparity = pXpYD[nTile][2] + disparity_corr;
tp_task.target_disparity = (float) disparity; // will it be used?
......
......@@ -95,7 +95,7 @@ public class ImagejJp4Tiff {
// private static final int FIXCH6_MAXVAL = 23367; // higher - subtract 4096, <19271 -add 4096
private static final int FIXCH6_EXPECTED = 21319; // expected value
private static final String FIXCH6_EARLIEST = "2021-12-01 00:00:00.000";
private static final String FIXCH6_LATEST = "2022-04-01 00:00:00.000";
private static final String FIXCH6_LATEST = "2022-12-01 00:00:00.000";
// -- Fields --
......
......@@ -141,35 +141,16 @@ public class ImageDtt extends ImageDttCPU {
final boolean macro_mode = macro_scale != 1; // correlate tile data instead of the pixel data
// final int quad = getNumSensors(); // 4; // number of subcameras
// final int numcol = 3; // number of colors // keep the same, just do not use [0] and [1], [2] - green
final int numcol = isMonochrome()?1:3;
final int width = gpuQuad.getImageWidth();
final int height = gpuQuad.getImageHeight();
final int tilesX=gpuQuad.getTilesX(); // width/transform_size;
final int tilesY=gpuQuad.getTilesY(); // final int tilesY=height/transform_size;
//// final int nTilesInChn=tilesX*tilesY;
//// final double [][][][][][] clt_data = new double[quad][numcol][tilesY][tilesX][][];
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final double [] col_weights= new double [numcol]; // colors are RBG
final double [][] dbg_distort = debug_distort? (new double [4*getNumSensors()][tilesX*tilesY]) : null;
// not yet used with GPU
/**
final double [][] corr_wnd = Corr2dLMA.getCorrWnd(
transform_size,
imgdtt_params.lma_wnd);
final double [] corr_wnd_inv_limited = (imgdtt_params.lma_min_wnd <= 1.0)? new double [corr_wnd.length * corr_wnd[0].length]: null;
if (corr_wnd_inv_limited != null) {
double inv_pwr = imgdtt_params.lma_wnd_pwr - (imgdtt_params.lma_wnd - 1.0); // compensate for lma_wnd
for (int i = imgdtt_params.lma_hard_marg; i < (corr_wnd.length - imgdtt_params.lma_hard_marg); i++) {
for (int j = imgdtt_params.lma_hard_marg; j < (corr_wnd.length - imgdtt_params.lma_hard_marg); j++) {
corr_wnd_inv_limited[i * (corr_wnd.length) + j] = 1.0/Math.max(Math.pow(corr_wnd[i][j],inv_pwr), imgdtt_params.lma_min_wnd);
}
}
}
*/
// keep for now for mono, find out what do they mean for macro mode
if (isMonochrome()) {
......@@ -1173,6 +1154,7 @@ public class ImageDtt extends ImageDttCPU {
* @param globalDebugLevel
*/
public void setReferenceTD(
final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
final boolean use_reference_buffer,
final TpTask[] tp_tasks,
......@@ -1203,7 +1185,8 @@ public class ImageDtt extends ImageDttCPU {
tp_tasks,
false); // boolean use_aux // while is it in class member? - just to be able to free
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
gpuQuad.execConvertDirect(use_reference_buffer); // put results into a "reference" buffer
// int [] wh = null;
gpuQuad.execConvertDirect(use_reference_buffer, wh); // put results into a "reference" buffer
}
......
......@@ -33,8 +33,10 @@ public class IntersceneMatchParameters {
public boolean force_ref_dsi = false; // true;
public boolean force_orientations = false;
public boolean force_interscene = false; // true;
public boolean export_images = true; // 16-slice images (same disparity, COMBO_DSN_INDX_DISP_FG and COMBO_DSN_INDX_DISP_BG_ALL,
public boolean show_images = false;
public boolean export_images = true; // pseudo-color 16-slice images (same disparity, COMBO_DSN_INDX_DISP_FG and COMBO_DSN_INDX_DISP_BG_ALL,
public boolean show_images = false; // color, infinity
public boolean show_images_bgfg = false; // bg and fg
public boolean show_images_mono = false; // float, monochrome 16-slice images (same disparity, COMBO_DSN_INDX_DISP_FG and COMBO_DSN_INDX_DISP_BG_ALL,
public boolean show_ranges = true;
public double range_disparity_offset = -0.08;
......@@ -112,8 +114,12 @@ public class IntersceneMatchParameters {
"Force interscene calculation (+ML export) even if it was performed before.");
gd.addCheckbox ("Export all-sensor images", this.export_images,
"Export multi-slice images: with constant disparity, with foreground disparity, and with background disparity");
gd.addCheckbox ("Show exported images", this.show_images,
"Display generated/saved images");
gd.addCheckbox ("Show exported images (same disparity)", this.show_images,
"Display generated/saved image set, pseudocolors");
gd.addCheckbox ("Show exported FG/BG", this.show_images_bgfg,
"Show foreground and background exported images");
gd.addCheckbox ("Show floating-point monochrome images", this.show_images_mono,
"Display generated/saved monochrome images");
gd.addCheckbox ("Show distances in meters", this.show_ranges,
"Calculate strength, distance, X, and Y in meters");
......@@ -226,6 +232,8 @@ public class IntersceneMatchParameters {
this.force_interscene = gd.getNextBoolean();
this.export_images = gd.getNextBoolean();
this.show_images = gd.getNextBoolean();
this.show_images_bgfg = gd.getNextBoolean();
this.show_images_mono = gd.getNextBoolean();
this.show_ranges = gd.getNextBoolean();
this.range_disparity_offset = gd.getNextNumber();
this.range_min_strength = gd.getNextNumber();
......@@ -282,6 +290,8 @@ public class IntersceneMatchParameters {
properties.setProperty(prefix+"force_interscene", this.force_interscene + ""); // boolean
properties.setProperty(prefix+"export_images", this.export_images + ""); // boolean
properties.setProperty(prefix+"show_images", this.show_images + ""); // boolean
properties.setProperty(prefix+"show_images_bgfg", this.show_images_bgfg + ""); // boolean
properties.setProperty(prefix+"show_images_mono", this.show_images_mono + ""); // boolean
properties.setProperty(prefix+"show_ranges", this.show_ranges + ""); // boolean
properties.setProperty(prefix+"range_disparity_offset",this.range_disparity_offset+""); // double
......@@ -337,6 +347,8 @@ public class IntersceneMatchParameters {
if (properties.getProperty(prefix+"force_interscene")!=null) this.force_interscene=Boolean.parseBoolean(properties.getProperty(prefix+"force_interscene"));
if (properties.getProperty(prefix+"export_images")!=null) this.export_images=Boolean.parseBoolean(properties.getProperty(prefix+"export_images"));
if (properties.getProperty(prefix+"show_images")!=null) this.show_images=Boolean.parseBoolean(properties.getProperty(prefix+"show_images"));
if (properties.getProperty(prefix+"show_images_bgfg")!=null) this.show_images_bgfg=Boolean.parseBoolean(properties.getProperty(prefix+"show_images_bgfg"));
if (properties.getProperty(prefix+"show_images_mono")!=null) this.show_images_mono=Boolean.parseBoolean(properties.getProperty(prefix+"show_images_mono"));
if (properties.getProperty(prefix+"show_ranges")!=null) this.show_images=Boolean.parseBoolean(properties.getProperty(prefix+"show_ranges"));
if (properties.getProperty(prefix+"range_disparity_offset")!=null) this.range_disparity_offset=Double.parseDouble(properties.getProperty(prefix+"range_disparity_offset"));
if (properties.getProperty(prefix+"range_min_strength")!=null) this.range_min_strength=Double.parseDouble(properties.getProperty(prefix+"range_min_strength"));
......@@ -392,6 +404,8 @@ public class IntersceneMatchParameters {
imp.force_interscene = this.force_interscene;
imp.export_images = this.export_images;
imp.show_images = this.show_images;
imp.show_images_bgfg = this.show_images_bgfg;
imp.show_images_mono = this.show_images_mono;
imp.show_ranges = this.show_ranges;
imp.range_disparity_offset = this.range_disparity_offset;
imp.range_min_strength = this.range_min_strength;
......
package com.elphel.imagej.tileprocessor;
import java.awt.Rectangle;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
......@@ -237,6 +238,7 @@ public class MultisceneLY {
String ts = scenes[nscene].getImageName();
if (nscene == last_scene_index) {
scenes_pXpYD[nscene] = OpticalFlow.transformToScenePxPyD(
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
OpticalFlow.ZERO3, // final double [] scene_xyz, // camera center in world coordinates
OpticalFlow.ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
......@@ -253,6 +255,7 @@ public class MultisceneLY {
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
//setupERS() will be inside transformToScenePxPyD()
scenes_pXpYD[nscene] = OpticalFlow.transformToScenePxPyD( // will be null for disparity == NaN, total size - tilesX*tilesY
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
......
......@@ -2720,6 +2720,7 @@ public class OpticalFlow {
final QuadCLT scene_QuadClt,
final QuadCLT reference_QuadClt) {
return transformToScenePxPyD(
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // camera center in world coordinates
scene_atr, // camera orientation relative to world frame
......@@ -2728,6 +2729,111 @@ public class OpticalFlow {
this.threadsMax);
}
/**
* Calculate pX, pY, Disparity triplets for the rotated scene to match uniform grid of a virtual camera
* Supports reference window larger that the physical sensor to show more of the other frames with partial
* overlap.
* @param full_woi_in null or a larger reference window {width, height, left, top}
* @param disparity_ref_in disparity value - either full_woi size or a reference frame only (rest will be 0)
* @param scene_xyz scene linear offset (in meters)
* @param scene_atr scene azimuth, tilt, roll offset
* @param scene_QuadClt
* @param reference_QuadClt
* @param threadsMax
* @return pX, pY, Disparity of the other scene. pX, pY are measured from the sensor top left corner
*/
public static double [][] transformToScenePxPyD(
final Rectangle full_woi_in, // show larger than sensor WOI (or null) IN TILES
final double [] disparity_ref_in, // invalid tiles - NaN in disparity
final double [] scene_xyz, // camera center in world coordinates
final double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene_QuadClt,
final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
int threadsMax)
{
TileProcessor tp = scene_QuadClt.getTileProcessor();
final int tilesX = (full_woi_in==null) ? tp.getTilesX() : full_woi_in.width; // full width,includeing extra
final int tilesY = (full_woi_in==null) ? tp.getTilesY() : full_woi_in.height;
final int offsetX_ref = (full_woi_in==null) ? 0 : full_woi_in.x;
final int offsetY_ref = (full_woi_in==null) ? 0 : full_woi_in.y;
int ref_w = tp.getTilesX();
int ref_h = tp.getTilesY();
double [] dref = disparity_ref_in;
if (full_woi_in!=null) {
if ((ref_w + offsetX_ref) > tilesX) ref_w = tilesX - offsetX_ref;
if ((ref_h + offsetY_ref) > tilesY) ref_h = tilesY - offsetY_ref;
if (disparity_ref_in.length < (full_woi_in.width * full_woi_in.height)) {
dref= new double[full_woi_in.width * full_woi_in.height];
for (int i = 0; i < ref_h; i++) {
System.arraycopy(
disparity_ref_in,
i * tp.getTilesX(), // not truncated
dref,
(i + offsetY_ref) * full_woi_in.width + offsetX_ref,
ref_w); // may be truncated
}
}
}
final double [] disparity_ref = dref;
final int tilesX_ref = ref_w;
final int tilesY_ref = ref_h;
final int tiles = tilesX*tilesY;
final int transform_size = tp.getTileSize();
final double [][] pXpYD= new double [tiles][];
final ErsCorrection ersSceneCorrection = scene_QuadClt.getErsCorrection();
final ErsCorrection ersReferenceCorrection = (reference_QuadClt!=null)? reference_QuadClt.getErsCorrection(): ersSceneCorrection;
if (reference_QuadClt!=null) {
ersReferenceCorrection.setupERS(); // just in case - setUP using instance paRAMETERS
}
ersSceneCorrection.setupERS();
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) if (!Double.isNaN(disparity_ref[nTile])) {
double disparity = disparity_ref[nTile];
int tileY = nTile / tilesX;
int tileX = nTile % tilesX;
double centerX = (tileX + 0.5 - offsetX_ref) * transform_size; // - shiftX;
double centerY = (tileY + 0.5 - offsetY_ref) * transform_size; // - shiftY;
if (disparity < 0) {
disparity = 1.0* disparity; // 0.0;
}
if (scene_QuadClt == reference_QuadClt) {
pXpYD[nTile] = new double [] {centerX, centerY, disparity};
} else {
pXpYD[nTile] = ersReferenceCorrection.getImageCoordinatesERS( // ersCorrection - reference
scene_QuadClt, // QuadCLT cameraQuadCLT, // camera station that got image to be to be matched
centerX, // double px, // pixel coordinate X in the reference view
centerY, // double py, // pixel coordinate Y in the reference view
disparity, // double disparity, // reference disparity
true, // boolean distortedView, // This camera view is distorted (diff.rect), false - rectilinear
ZERO3, // double [] reference_xyz, // this view position in world coordinates (typically ZERO3)
ZERO3, // double [] reference_atr, // this view orientation relative to world frame (typically ZERO3)
true, // boolean distortedCamera, // camera view is distorted (false - rectilinear)
scene_xyz, // double [] camera_xyz, // camera center in world coordinates
scene_atr, // double [] camera_atr, // camera orientation relative to world frame
LINE_ERR); // double line_err) // threshold error in scan lines (1.0)
if (pXpYD[nTile] != null) {
if ( (pXpYD[nTile][0] < 0.0) ||
(pXpYD[nTile][1] < 0.0) ||
(pXpYD[nTile][0] > ersSceneCorrection.getSensorWH()[0]) ||
(pXpYD[nTile][1] > ersSceneCorrection.getSensorWH()[1])) {
pXpYD[nTile] = null;
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return pXpYD;
}
@Deprecated
public static double [][] transformToScenePxPyD(
final double [] disparity_ref, // invalid tiles - NaN in disparity
final double [] scene_xyz, // camera center in world coordinates
......@@ -2794,6 +2900,8 @@ public class OpticalFlow {
return pXpYD;
}
//TODO: refine inter-scene pose to accommodate refined disparity map
/**
* Removing BG tiles that are not visible because of the FG ones
......@@ -3855,6 +3963,8 @@ public class OpticalFlow {
boolean export_images = clt_parameters.imp.export_images;
boolean export_dsi_image = clt_parameters.imp.show_ranges;
boolean show_images = clt_parameters.imp.show_images;
boolean show_images_bgfg = clt_parameters.imp.show_images_bgfg;
boolean show_images_mono = clt_parameters.imp.show_images_mono;
double range_disparity_offset = clt_parameters.imp.range_disparity_offset ; // -0.08;
double range_min_strength = clt_parameters.imp.range_min_strength ; // 0.5;
......@@ -4109,6 +4219,7 @@ public class OpticalFlow {
}
if (export_images) {
final boolean toRGB = true;
if (combo_dsn_final == null) {
combo_dsn_final = quadCLTs[ref_index].readDoubleArrayFromModelDirectory(
"-INTER-INTRA-LMA", // String suffix,
......@@ -4166,51 +4277,108 @@ public class OpticalFlow {
double [] constant_disparity = new double [fg_disparity.length];
Arrays.fill(constant_disparity,clt_parameters.disparity);
Rectangle testr = new Rectangle(10, 8, 100,80);
ImagePlus imp_constant = QuadCLT.renderGPUFromDSI(
testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
constant_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
new double[] {.1,0.1,.1}, // ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
true, // toRGB, // final boolean toRGB,
"GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
threadsMax, // int threadsMax,
debugLevel); // int debugLevel)
quadCLTs[ref_index].saveImagePlusInModelDirectory(
"GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
null, // "GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
imp_constant); // ImagePlus imp)
ImagePlus imp_constant_mono = QuadCLT.renderGPUFromDSI(
testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
constant_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
false, // toRGB, // final boolean toRGB,
"GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
threadsMax, // int threadsMax,
debugLevel); // int debugLevel)
quadCLTs[ref_index].saveImagePlusInModelDirectory(
null, // "GPU-SHIFTED-D"+clt_parameters.disparity, // String suffix,
imp_constant_mono); // ImagePlus imp)
if (show_images) {
imp_constant.show();
if (show_images_mono) {
imp_constant_mono.show();
}
}
Arrays.fill(constant_disparity,clt_parameters.disparity);
ImagePlus imp_fg = QuadCLT.renderGPUFromDSI(
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
fg_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
true, // toRGB, // final boolean toRGB,
"GPU-SHIFTED-FOREGROUND", // String suffix,
threadsMax, // int threadsMax,
debugLevel); // int debugLevel)
quadCLTs[ref_index].saveImagePlusInModelDirectory(
"GPU-SHIFTED-FOREGROUND", // String suffix,
null, // "GPU-SHIFTED-FOREGROUND", // String suffix,
imp_fg); // ImagePlus imp)
if (show_images) {
ImagePlus imp_fg_mono = QuadCLT.renderGPUFromDSI(
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
fg_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
false, // toRGB, // final boolean toRGB,
"GPU-SHIFTED-FOREGROUND", // String suffix,
threadsMax, // int threadsMax,
debugLevel); // int debugLevel)
quadCLTs[ref_index].saveImagePlusInModelDirectory(
null, // "GPU-SHIFTED-FOREGROUND", // String suffix,
imp_fg_mono); // ImagePlus imp)
if (show_images && show_images_bgfg) {
imp_fg.show();
if (show_images_mono) {
imp_fg_mono.show();
}
}
ImagePlus imp_bg = QuadCLT.renderGPUFromDSI(
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
bg_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
true, // final boolean toRGB,
"GPU-SHIFTED-BACKGROUND", // String suffix,
threadsMax, // int threadsMax,
debugLevel); // int debugLevel)
quadCLTs[ref_index].saveImagePlusInModelDirectory(
"GPU-SHIFTED-BACKGROUND", // String suffix,
null, // "GPU-SHIFTED-BACKGROUND", // String suffix,
imp_bg); // ImagePlus imp)
if (show_images) {
ImagePlus imp_bg_mono = QuadCLT.renderGPUFromDSI(
null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters, // CLTParameters clt_parameters,
bg_disparity, // double [] disparity_ref,
ZERO3, // final double [] scene_xyz, // camera center in world coordinates
ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs[ref_index], // final QuadCLT scene,
false, // final boolean toRGB,
"GPU-SHIFTED-BACKGROUND", // String suffix,
threadsMax, // int threadsMax,
debugLevel); // int debugLevel)
quadCLTs[ref_index].saveImagePlusInModelDirectory(
null, // "GPU-SHIFTED-BACKGROUND", // String suffix,
imp_bg_mono); // ImagePlus imp)
if (show_images && show_images_bgfg) {
imp_bg.show();
if (show_images_mono) {
imp_bg_mono.show();
}
}
}
......@@ -9739,6 +9907,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
float [][][][] fcorr_td = null; // no accumulation, use data in GPU
ref_scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
image_dtt.setReferenceTD(
null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
true, // final boolean use_reference_buffer,
tp_tasks_ref, // final TpTask[] tp_tasks,
......@@ -9753,6 +9922,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
true, //boolean use_reference
"GPU-SHIFTED-REFERENCE"); // String suffix)
......@@ -9781,6 +9951,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(ref_scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
false, //boolean use_reference
"GPU-SHIFTED-SCENE"); // String suffix)
......
......@@ -2031,15 +2031,90 @@ public class QuadCLT extends QuadCLTCPU {
}
public static ImagePlus renderGPUFromDSI(
final Rectangle full_woi_in, // show larger than sensor WOI in tiles (or null)
CLTParameters clt_parameters,
double [] disparity_ref,
final double [] scene_xyz, // camera center in world coordinates
final double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final boolean toRGB,
String suffix,
int threadsMax,
final int debugLevel){
double [][] pXpYD =OpticalFlow.transformToScenePxPyD(
full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
null, // scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
threadsMax); // int threadsMax)
int rendered_width = scene.getErsCorrection().getSensorWH()[0];
if (full_woi_in != null) {
rendered_width = full_woi_in.width * GPUTileProcessor.DTT_SIZE;
}
//scene_QuadClt.getTileProcessor().getTileSize();
TpTask[] tp_tasks_ref = GpuQuad.setInterTasks( // inter?
scene.getNumSensors(),
rendered_width, // should match output size, pXpYD.length
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
0.0, // final double disparity_corr,
0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
threadsMax); // final int threadsMax) // maximal number of threads to launch
scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
ImageDtt image_dtt = new ImageDtt(
scene.getNumSensors(),
clt_parameters.transform_size,
clt_parameters.img_dtt,
scene.isAux(),
scene.isMonochrome(),
scene.isLwir(),
clt_parameters.getScaleStrength(scene.isAux()),
scene.getGPU());
boolean use_reference = false;
int [] wh = (full_woi_in == null)? null: new int[]{
full_woi_in.width * GPUTileProcessor.DTT_SIZE,
full_woi_in.height * GPUTileProcessor.DTT_SIZE};
// boolean toRGB = true; // does not work here, define in ColorProcParameters
image_dtt.setReferenceTD( // change to main?
wh, // null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference, // true, // final boolean use_reference_buffer,
tp_tasks_ref, // final TpTask[] tp_tasks,
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
threadsMax, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
ImagePlus imp_render = scene.renderFromTD (
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,\
wh, // null, // int [] wh,
toRGB, // boolean toRGB,
use_reference, // boolean use_reference
suffix); // String suffix)
return imp_render;
}
@Deprecated
public static ImagePlus renderGPUFromDSI( // being replaced by the above
CLTParameters clt_parameters,
double [] disparity_ref,
final double [] scene_xyz, // camera center in world coordinates
final double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final boolean toRGB,
String suffix,
int threadsMax,
final int debugLevel){
double [][] pXpYD =OpticalFlow.transformToScenePxPyD(
null, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
......@@ -2068,8 +2143,9 @@ public class QuadCLT extends QuadCLTCPU {
clt_parameters.getScaleStrength(scene.isAux()),
scene.getGPU());
boolean use_reference = false;
boolean toRGB = true; // does not work here, define in ColorProcParameters
// boolean toRGB = true; // does not work here, define in ColorProcParameters
image_dtt.setReferenceTD( // change to main?
null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference, // true, // final boolean use_reference_buffer,
tp_tasks_ref, // final TpTask[] tp_tasks,
......@@ -2083,6 +2159,7 @@ public class QuadCLT extends QuadCLTCPU {
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,
null, // int [] wh,
toRGB, // boolean toRGB,
use_reference, //boolean use_reference
suffix); // String suffix)
......@@ -2090,22 +2167,26 @@ public class QuadCLT extends QuadCLTCPU {
}
public ImagePlus renderFromTD (
CLTParameters clt_parameters,
ColorProcParameters colorProcParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
int [] wh,
boolean toRGB,
boolean use_reference,
String suffix
) {
gpuQuad.execImcltRbgAll(isMonochrome(), use_reference); // add ref
suffix+=(toRGB?"-COLOR":"-MONO");
gpuQuad.execImcltRbgAll(
isMonochrome(),
use_reference,
wh); //int [] wh
// get data back from GPU
float [][][] iclt_fimg = new float [getNumSensors()][][];
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
iclt_fimg[ncam] = gpuQuad.getRBG(ncam);
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // updated window
}
// 2022/06/15 - handles variable window size
int out_width = gpuQuad.getImageWidth();// + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int out_height = gpuQuad.getImageHeight(); // + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
if (isLwir() && colorProcParameters.lwir_autorange) {
......@@ -2140,7 +2221,7 @@ public class QuadCLT extends QuadCLTCPU {
rgbParameters,
title, // String name,
"-D"+clt_parameters.disparity, //String suffix, // such as disparity=...
toRGB,
toRGB, // does not work here?
!correctionsParameters.jpeg, // boolean bpp16, // 16-bit per channel color mode for result
false, // true, // boolean saveShowIntermediate, // save/show if set globally
false, // boolean saveShowFinal, // save/show result (color image?)
......@@ -2263,6 +2344,9 @@ public class QuadCLT extends QuadCLTCPU {
boolean advanced= correctionsParameters.zcorrect || correctionsParameters.equirectangular;
boolean toRGB= advanced? true: correctionsParameters.toRGB;
if (isLwir()) {
toRGB = colorProcParameters.lwir_pseudocolor;
}
ImagePlus [] results = null;
if (imp_quad != null) {
......@@ -3655,14 +3739,14 @@ public class QuadCLT extends QuadCLTCPU {
float [][] texture_rgb = isMonochrome() ? (new float [][] {texture_img[0]}): (new float [][] {texture_img[0],texture_img[1],texture_img[2]});
float [][] texture_rgba = isMonochrome() ? (new float [][] {texture_img[0],texture_img[1]}) : (new float [][] {texture_img[0],texture_img[1],texture_img[2],texture_img[3]});
boolean toRGB = !isLwir() || colorProcParameters.lwir_pseudocolor;
ImagePlus imp_texture_bgnd = linearStackToColor(
clt_parameters,
colorProcParameters,
rgbParameters,
name+"-texture-bgnd", // String name,
"", //String suffix, // such as disparity=...
true, // toRGB,
toRGB, // toRGB,
!this.correctionsParameters.jpeg, // boolean bpp16, // 16-bit per channel color mode for result
true, // boolean saveShowIntermediate, // save/show if set globally
false, //true, // boolean saveShowFinal, // save/show result (color image?)
......@@ -5058,13 +5142,14 @@ public class QuadCLT extends QuadCLTCPU {
texture_woi_pix.width/transform_size,
texture_woi_pix.height/transform_size);
scan.setSelected(scan.getTextureSelection().clone()); // null
boolean toRGB = !isLwir() || colorProcParameters.lwir_pseudocolor;
ImagePlus imp_texture_cluster = linearStackToColor(
clt_parameters,
colorProcParameters,
rgbParameters,
name+"-texture", // String name,
"", //String suffix, // such as disparity=...
true, // toRGB,
toRGB, // true, // toRGB,
!this.correctionsParameters.jpeg, // boolean bpp16, // 16-bit per channel color mode for result
true, // boolean saveShowIntermediate, // save/show if set globally
false, //true, // boolean saveShowFinal, // save/show result (color image?)
......
......@@ -1175,12 +1175,15 @@ public class QuadCLTCPU {
}
public void saveImagePlusInModelDirectory(
String suffix,
String suffix, // null - use title from the imp
ImagePlus imp)
{
String x3d_path = getX3dDirectory();
String file_name = image_name + suffix;
String file_path = x3d_path + Prefs.getFileSeparator() + file_name + ".tiff";
String file_name = (suffix==null) ? imp.getTitle():(image_name + suffix);
String file_path = x3d_path + Prefs.getFileSeparator() + file_name; // + ".tiff";
if (!file_path.endsWith(".tiff")) {
file_path +=".tiff";
}
FileSaver fs=new FileSaver(imp);
fs.saveAsTiff(file_path);
System.out.println("saveDoubleArrayInModelDirectory(): saved "+file_path);
......@@ -7122,7 +7125,8 @@ public class QuadCLTCPU {
float [] alpha = null; // (0..1.0)
if (iclt_data.length > 3) alpha = iclt_data[3];
if (isLwir()) {
if (!colorProcParameters.lwir_pseudocolor) {
//// if (!colorProcParameters.lwir_pseudocolor) {
if (!toRGB) {
ImageProcessor ip= new FloatProcessor(width,height);
ip.setPixels(iclt_data[0]);
ip.resetMinAndMax();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment