Commit 605aa42c authored by Andrey Filippov's avatar Andrey Filippov

Refactored 2 GPU-related classes, adding CPU-only (GPU code requires

deeper updates for 16 sensors) methods for interscene accumulation.
parent 00ba5f14
......@@ -7161,6 +7161,7 @@ List calibration
imp.setProperty("focalLength", ""+subCam.focalLength);
imp.setProperty("focalLength_units", "mm");
imp.setProperty("pixelSize", ""+subCam.pixelSize);
imp.setProperty("lineTime", ""+subCam.lineTime);
imp.setProperty("pixelSize_units", "um");
imp.setProperty("distortionA8", ""+subCam.distortionA8);
imp.setProperty("distortionA7", ""+subCam.distortionA7);
......@@ -7380,6 +7381,17 @@ List calibration
subCam.distortionRadius= Double.parseDouble((String) imp.getProperty("distortionRadius"));
subCam.focalLength= Double.parseDouble((String) imp.getProperty("focalLength"));
subCam.pixelSize= Double.parseDouble((String) imp.getProperty("pixelSize"));
if (imp.getProperty("lineTime") != null) {
subCam.pixelSize= Double.parseDouble((String) imp.getProperty("lineTime"));
} else { // fix older saved files
if (subCam.pixelSize < 5.0) {
subCam.pixelSize=3.638E-5;
} else if (distortionCalibrationData.eyesisCameraParameters.getSensorWidth(numSensor) == 640){ // Boson
subCam.lineTime = 2.7778e-05; // 12um pixel, Boson
} else {
subCam.lineTime = 7.8e-05; // 12um pixel, Lepton (may7 be wrong)
}
}
if (imp.getProperty("distortionA8")!=null) {
subCam.distortionA8= Double.parseDouble((String) imp.getProperty("distortionA8"));
} else subCam.distortionA8=0.0;
......
......@@ -15706,6 +15706,11 @@ public class PixelMapping {
public double psi; // degrees, rotation (of the sensor) around the optical axis. Positive if camera is rotated clockwise looking to the target roll
public double focalLength=4.5;
public double pixelSize= 2.2; //um
public double lineTime = 3.638E-5; // 2.7778e-05 for Boson
public boolean monochrome = false;
public boolean lwir = false;
public double distortionRadius= 2.8512; // mm - half width of the sensor
public double distortionA8=0.0; //r^8 (normalized to focal length or to sensor half width?)
public double distortionA7=0.0; //r^7 (normalized to focal length or to sensor half width?)
......@@ -16301,6 +16306,9 @@ public class PixelMapping {
public void setSensorDataFromImageStack(ImagePlus imp){
// int corrX=0,corrY=1,corrMask=2;
boolean need_lineTime_fix = false; // will use sensor default if not provided, 3.638E-5 for RGB and 2.7778e-05 for Boson
boolean need_monochrome_fix = false; // will use sensor default if not provided, 3.638E-5 for RGB and 2.7778e-05 for Boson
boolean need_lwir_fix = false; // will use sensor default if not provided, 3.638E-5 for RGB and 2.7778e-05 for Boson
if (imp == null){
String msg="Sensor Calibration image is null";
IJ.showMessage("Error",msg);
......@@ -16325,10 +16333,29 @@ public class PixelMapping {
"heading",
"elevation",
"roll",
"channel"
"channel",
"lineTime", // optional
"monochrome", // optional
"lwir" // optional
};
for (int i=0; i<requiredProperties.length;i++) if (imp.getProperty(requiredProperties[i])==null){
String msg="Required property "+requiredProperties[i]+" is not defined in "+imp.getTitle();
// is it
if (requiredProperties[i].equals("lineTime")) {
System.out.println(requiredProperties[i]+" is not provided, will use default");
need_lineTime_fix = true;
continue;
}
if (requiredProperties[i].equals("monochrome")) {
System.out.println(requiredProperties[i]+" is not provided, will use default");
need_monochrome_fix = true;
continue;
}
if (requiredProperties[i].equals("lwir")) {
System.out.println(requiredProperties[i]+" is not provided, will use default");
need_lwir_fix = true;
continue;
}
IJ.showMessage("Error",msg);
throw new IllegalArgumentException (msg);
}
......@@ -16352,6 +16379,43 @@ public class PixelMapping {
this.distortionRadius= Double.parseDouble((String) imp.getProperty("distortionRadius"));
this.focalLength= Double.parseDouble((String) imp.getProperty("focalLength"));
this.pixelSize= Double.parseDouble((String) imp.getProperty("pixelSize"));
if (need_lineTime_fix) {
if (this.pixelSize < 5.0) {
this.lineTime = 3.638E-5;
} else if (this.pixelCorrectionWidth == 640){ // Boson
this.lineTime = 2.7778e-05; // 12um pixel, Boson
} else {
this.lineTime = 7.8e-05; // 12um pixel, Lepton (may7 be wrong)
}
} else {
this.lineTime= Double.parseDouble((String) imp.getProperty("lineTime"));
}
if (need_monochrome_fix) {
if (this.pixelSize < 5.0) {
this.monochrome = false;
} else if (this.pixelCorrectionWidth == 640){ // Boson
this.monochrome = true; // 12um pixel, Boson
} else {
this.monochrome = true; // 12um pixel, Lepton (may7 be wrong)
}
} else {
this.monochrome= Boolean.parseBoolean((String) imp.getProperty("monochrome"));
}
if (need_lwir_fix) {
if (this.pixelSize < 5.0) {
this.lwir = false;
} else if (this.pixelCorrectionWidth == 640){ // Boson
this.lwir = true; // 12um pixel, Boson
} else {
this.lwir = true; // 12um pixel, Lepton (may7 be wrong)
}
} else {
this.lwir= Boolean.parseBoolean((String) imp.getProperty("lwir"));
}
if (imp.getProperty("distortionA8")!=null) this.distortionA8= Double.parseDouble((String) imp.getProperty("distortionA8"));
else this.distortionA8= 0.0;
if (imp.getProperty("distortionA7")!=null) this.distortionA7= Double.parseDouble((String) imp.getProperty("distortionA7"));
......
......@@ -44,6 +44,7 @@ import java.util.Properties;
public double psi; // degrees, rotation (of the sensor) around the optical axis. Positive if camera is rotated clockwise looking to the target
public double focalLength=4.5;
public double pixelSize= 2.2; //um
public double lineTime = 3.638E-5; // 2.7778e-05 for Boson
public double distortionRadius= 2.8512; // mm - half width of the sensor
public double distortionA8=0.0; //r^8 (normalized to focal length or to sensor half width?)
public double distortionA7=0.0; //r^7 (normalized to focal length or to sensor half width?)
......@@ -100,6 +101,7 @@ import java.util.Properties;
double psi, // degrees, rotation (of the sensor) around the optical axis. Positive if camera is rotated clockwise looking to the target
double focalLength,
double pixelSize,//um
double lineTime, // = 3.638E-5; // 2.7778e-05 for Boson
double distortionRadius, //mm - half width of the sensor
double distortionA8, // r^8
double distortionA7, // r^7
......@@ -183,6 +185,7 @@ import java.util.Properties;
this.psi,
this.focalLength,
this.pixelSize,
this.lineTime,
this.distortionRadius,
this.distortionA8,
this.distortionA7,
......@@ -227,6 +230,7 @@ import java.util.Properties;
properties.setProperty(prefix+"psi", this.psi+"");
properties.setProperty(prefix+"focalLength", this.focalLength+"");
properties.setProperty(prefix+"pixelSize", this.pixelSize+"");
properties.setProperty(prefix+"lineTime", this.lineTime+"");
properties.setProperty(prefix+"distortionRadius", this.distortionRadius+"");
properties.setProperty(prefix+"distortionA8", this.distortionA8+"");
properties.setProperty(prefix+"distortionA7", this.distortionA7+"");
......@@ -288,6 +292,8 @@ import java.util.Properties;
this.focalLength=Double.parseDouble(properties.getProperty(prefix+"focalLength"));
if (properties.getProperty(prefix+"pixelSize")!=null)
this.pixelSize=Double.parseDouble(properties.getProperty(prefix+"pixelSize"));
if (properties.getProperty(prefix+"lineTime")!=null)
this.lineTime=Double.parseDouble(properties.getProperty(prefix+"lineTime"));
if (properties.getProperty(prefix+"distortionRadius")!=null)
this.distortionRadius=Double.parseDouble(properties.getProperty(prefix+"distortionRadius"));
if (properties.getProperty(prefix+"distortionA8")!=null)
......
......@@ -104,7 +104,11 @@ import ij.process.ImageProcessor;
not_empty = true;
fpixels=new float[pixels[i].length];
for (j=0;j<fpixels.length;j++) fpixels[j]=(float)pixels[i][j];
if (i < titles.length) {
array_stack.addSlice(titles[i], fpixels);
} else {
array_stack.addSlice("slice-"+i, fpixels);
}
}
if (not_empty) {
ImagePlus imp_stack = new ImagePlus(title, array_stack);
......
......@@ -1224,6 +1224,7 @@ public class EyesisDCT {
4, // 4 sensors, will not be used here
dctParameters.dct_size,
null, // FIXME: needs ImageDttParameters (clt_parameters.img_dtt),
false, // aux
false, // mono
false, // lwir
1.0); // Bayer( not monochrome), scale correlation strengths
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
package com.elphel.imagej.gpu;
public class TpTask {
public int task; // [0](+1) - generate 4 images, [4..9]+16..+512 - correlation pairs, 2 - generate texture tiles
public float target_disparity;
public int num_sensors = 4;
public int ty;
public int tx;
public float[][] xy = null;
public float[][] xy_aux = null;
public float [][] disp_dist = null;
public TpTask() {}
public TpTask(int num_sensors, int tx, int ty, float target_disparity, int task ) {
this.tx = tx;
this.ty = ty;
this.target_disparity = target_disparity;
this.task = task;
this.num_sensors = num_sensors; // will not be encoded
this.disp_dist = new float [num_sensors][4];
}
/**
* Initialize from the float array (read from the GPU)
* @param flt float array containing tasks data
* @param indx task number to use
*/
public TpTask(float [] flt, int indx, boolean use_aux)
{
task = Float.floatToIntBits(flt[indx++]);
int txy = Float.floatToIntBits(flt[indx++]);
ty = txy >> 16;
tx = txy & 0xffff;
if (use_aux) {
xy_aux = new float[num_sensors][2];
for (int i = 0; i < num_sensors; i++) {
xy_aux[i][0] = flt[indx++];
xy_aux[i][1] = flt[indx++];
}
} else {
xy = new float[num_sensors][2];
for (int i = 0; i < num_sensors; i++) {
xy[i][0] = flt[indx++];
xy[i][1] = flt[indx++];
}
}
target_disparity = flt[indx++];
disp_dist = new float [num_sensors][4];
for (int i = 0; i < num_sensors; i++) {
for (int j = 0; j < 4; j++) {
disp_dist[i][j] = flt[indx++];
}
}
}
public float [][] getDispDist(){
return disp_dist;
}
public double [][] getDoubleDispDist(){
if (disp_dist == null) { // can it happen?
return null;
}
double [][] ddisp_dist = new double [disp_dist.length][disp_dist[0].length];
for (int nsens = 0; nsens < disp_dist.length; nsens++) {
for (int i = 0; i < disp_dist[nsens].length; i++) {
ddisp_dist[nsens][i] = disp_dist[nsens][i];
}
}
return ddisp_dist;
}
public float [][] getXY(boolean use_aux){
return use_aux? xy_aux : xy;
}
public double [][] getDoubleXY(boolean use_aux){
float [][] fXY = getXY(use_aux);
if (fXY == null) {
return null;
}
double [][] dXY = new double [fXY.length][fXY[0].length];
for (int nsens = 0; nsens < fXY.length; nsens++) {
for (int i = 0; i < fXY[nsens].length; i++) {
dXY[nsens][i] = fXY[nsens][i];
}
}
return dXY;
}
public int getTileY(){
return ty;
}
public int getTileX(){
return tx;
}
public int getTask() {
return task;
}
public double getTargetDisparity() {
return target_disparity;
}
// convert this class instance to float array to match layout of the C struct
public float [] asFloatArray(boolean use_aux) {
float [] flt = new float [GPUTileProcessor.TPTASK_SIZE];
return asFloatArray(flt, 0, use_aux);
}
// convert this class instance to float array to match layout of the C struct,
// fill existing float array from the specified index
public float [] asFloatArray(float [] flt, int indx, boolean use_aux) {
flt[indx++] = Float.intBitsToFloat(task);
flt[indx++] = Float.intBitsToFloat(tx + (ty << 16));
float [][] offsets = use_aux? this.xy_aux: this.xy;
for (int i = 0; i < num_sensors; i++) {
if (offsets != null) {
flt[indx++] = offsets[i][0];
flt[indx++] = offsets[i][1];
} else {
indx+= 2;
}
}
flt[indx++] = this.target_disparity;
/*
for (int i = 0; i < NUM_CAMS; i++) { // actually disp_dist will be initialized by the GPU
indx+= 4;
flt[indx++] = disp_dist[i][0];
flt[indx++] = disp_dist[i][1];
flt[indx++] = disp_dist[i][2];
flt[indx++] = disp_dist[i][3];
}
*/
return flt;
}
}
\ No newline at end of file
......@@ -378,10 +378,22 @@ public class CLTPass3d{
}
return has_lma;
}
public void setLMA(boolean [] has_lma) {// use for combo tiles
this.has_lma = has_lma;
}
public void resetLMA() {
this.has_lma = null;
}
public void setLMA(double [] disparity_lma) {// use for combo tiles
this.has_lma = new boolean [disparity_lma.length];
for (int i = 0; i < disparity_lma.length; i++) {
this.has_lma[i] = !Double.isNaN(disparity_lma[i]);
}
}
public void fixNaNDisparity()
{
fixNaNDisparity(
......
......@@ -50,6 +50,7 @@ public class Clt1d {
nSens,
transform_size,
null, // FIXME: needs ImageDttParameters (clt_parameters.img_dtt),
false, // aux
false,
false,
1.0);
......
......@@ -1285,6 +1285,7 @@ public class Correlation2d {
* @param col_weights RBG color weights
* @return [pair][quadrant][index]
*/
@Deprecated
public double [][][] correlateCompositeTD(
double [][][][][][] clt_data,
int tileX,
......@@ -1310,14 +1311,50 @@ public class Correlation2d {
/**
* Calculate all required image pairs phase correlation, stay in Transform Domain
* @param clt_data_tile aberration-corrected FD CLT data for one tile [camera][color][quadrant][index]
* @param clt_data aberration-corrected FD CLT data [camera][color][tileY][tileX][quadrant][index]
* @param tileX tile to extract X index
* @param tileY tile to extract Y index
* @param pairs_mask bimask of required pairs
* @param lpf_rb optional low-pass filter - extra LPF for red and blue
* @param scale_value scale correlation results to compensate for lpf changes and other factors
* @param col_weights RBG color weights
* @return [pair][quadrant][index]
*/
public double [][][] correlateCompositeTD(
double [][][][][][] clt_data,
int tileX,
int tileY,
boolean [] pairs_mask,
double [] lpf_rb, // extra lpf for red and blue (unused for mono) or null
double scale_value, // scale correlation value
double [] col_weights) {
double [][][][] clt_data_tile = new double[clt_data.length][][][]; // [camera][color][quadrant][index]
for (int ncam = 0; ncam < clt_data.length; ncam++) if (clt_data[ncam] != null){
clt_data_tile[ncam] = new double[clt_data[ncam].length][][];
for (int ncol = 0; ncol < clt_data[ncam].length; ncol++) if ((clt_data[ncam][ncol] != null) && (clt_data[ncam][ncol][tileY] != null)){
clt_data_tile[ncam][ncol] = clt_data[ncam][ncol][tileY][tileX];
}
}
return correlateCompositeTD(
clt_data_tile,
pairs_mask, // already decoded so bit 0 - pair 0
lpf_rb,
scale_value,
col_weights);
}
/**
* Calculate all required image pairs phase correlation, stay in Transform Domain
* @param clt_data_tile aberration-corrected FD CLT data for one tile [camera][color][quadrant][index]
* @param pairs_mask bimask of required pairs NOW USE boolean array and new pairs
* @param lpf optional final low-pass filter
* @param lpf_rb optional low-pass filter (extra) for R,B components
* @param scale_value scale correlation results to compensate for lpf changes and other factors
* @param col_weights RBG color weights
* @return [pair][quadrant][index]
*/
@Deprecated
public double [][][] correlateCompositeTD(
double [][][][] clt_data_tile,
int pairs_mask, // already decoded so bit 0 - pair 0
......@@ -1341,6 +1378,42 @@ public class Correlation2d {
return pairs_corr;
}
/**
* Calculate all required image pairs phase correlation, stay in Transform Domain
* @param clt_data_tile aberration-corrected FD CLT data for one tile [camera][color][quadrant][index]
* @param pairs_mask bimask of required pairs
* @param lpf optional final low-pass filter
* @param lpf_rb optional low-pass filter (extra) for R,B components
* @param scale_value scale correlation results to compensate for lpf changes and other factors
* @param col_weights RBG color weights
* @return [pair][quadrant][index]
*/
public double [][][] correlateCompositeTD(
double [][][][] clt_data_tile,
boolean[] pairs_mask, // already decoded so bit 0 - pair 0
double [] lpf_rb, // extra lpf for red and blue (unused for mono) or null
double scale_value, // scale correlation value
double [] col_weights) {
if (clt_data_tile == null) return null;
double [][][] pairs_corr = new double [getNumPairs()][][];
for (int npair = 0; npair < pairs_corr.length; npair++) if (pairs_mask[npair]) {
int [] pair = getPair(npair);
int ncam1 = pair[0]; // start
int ncam2 = pair[1]; // end
if ((ncam1 < clt_data_tile.length) && (clt_data_tile[ncam1] != null) && (ncam2 < clt_data_tile.length) && (clt_data_tile[ncam2] != null)) {
pairs_corr[npair] = correlateCompositeTD(
clt_data_tile[ncam1], // double [][][] clt_data1,
clt_data_tile[ncam2], // double [][][] clt_data2,
lpf_rb, // double [] lpf_rb,
scale_value,
col_weights); // double [] col_weights,
}
}
return pairs_corr;
}
/**
* Calculate color channels FD phase correlations, mix results with weights, apply optional low-pass filter
* No transposing or rotation
......
......@@ -639,6 +639,8 @@ public class ErsCorrection extends GeometryCorrection {
public ErsCorrection(GeometryCorrection gc, boolean deep) {
debugLevel = gc.debugLevel;
line_time = gc.line_time; // 36.38! //26.5E-6; // duration of sensor scan line (for ERS)
monochrome = gc.monochrome;
lwir = gc.lwir;
pixelCorrectionWidth= gc.pixelCorrectionWidth; // 2592; // virtual camera center is at (pixelCorrectionWidth/2, pixelCorrectionHeight/2)
pixelCorrectionHeight= gc.pixelCorrectionHeight; // 1936;
focalLength = gc.focalLength; // =FOCAL_LENGTH;
......
......@@ -64,6 +64,9 @@ public class GeometryCorrection {
*/
public int debugLevel = 0;
public double line_time = 36.38E-6; // 26.5E-6; // duration of sensor scan line (for ERS) Wrong, 36.38us (change and re-run ERS
// Boson - 27.7778E-6 (750/27E6)
public boolean monochrome = false;
public boolean lwir = false;
public int pixelCorrectionWidth=2592; // virtual camera center is at (pixelCorrectionWidth/2, pixelCorrectionHeight/2)
public int pixelCorrectionHeight=1936;
......@@ -117,6 +120,15 @@ public class GeometryCorrection {
public int getNumSensors() {
return numSensors;
}
public boolean isMonochrome() {
return monochrome;
}
public boolean isLwir() {
return lwir;
}
protected double [][] get_rXY_ideal(){
if (rXY_ideal == null) {
if (numSensors == 4) {
......@@ -798,7 +810,7 @@ public class GeometryCorrection {
ro.aux_tilt = this.aux_tilt;
ro.aux_roll = this.aux_roll;
ro.aux_zoom = this.aux_zoom;
ro.full_par_index = this.full_par_index.clone();
ro.full_par_index = (this.full_par_index == null)?null:this.full_par_index.clone();
ro.par_scales = this.par_scales.clone();
return ro;
}
......@@ -1477,8 +1489,10 @@ public class GeometryCorrection {
double distortionRadius,
int pixelCorrectionWidth, // virtual camera center is at (pixelCorrectionWidth/2, pixelCorrectionHeight/2)
int pixelCorrectionHeight,
double pixelSize
double pixelSize,
double line_time,
boolean monochrome,
boolean lwir
) {
if (!Double.isNaN(focalLength)) this.focalLength = focalLength;
if (!Double.isNaN(distortionC)) this.distortionC = distortionC;
......@@ -1492,6 +1506,9 @@ public class GeometryCorrection {
if (pixelCorrectionWidth >= 0) this.pixelCorrectionWidth = pixelCorrectionWidth;
if (pixelCorrectionHeight >= 0) this.pixelCorrectionHeight = pixelCorrectionHeight;
if (!Double.isNaN(pixelSize)) this.pixelSize = pixelSize;
if (!Double.isNaN(line_time)) this.line_time = line_time;
this.monochrome = monochrome;
this.lwir = lwir;
// imp.setProperty("distortion_formula", "(normalized by distortionRadius in mm) Rdist/R=A8*R^7+A7*R^6+A6*R^5+A5*R^4+A*R^3+B*R^2+C*R+(1-A6-A7-A6-A5-A-B-C)");
// imp.setProperty("distortionRadius", ""+subCam.distortionRadius);
}
......@@ -1671,6 +1688,9 @@ public class GeometryCorrection {
System.out.println("pixelCorrectionWidth =\t"+ pixelCorrectionWidth+"\tpix");
System.out.println("pixelCorrectionHeight =\t"+ pixelCorrectionHeight+"\tpix");
System.out.println("pixelSize =\t"+ pixelSize+"\tum");
System.out.println("lineTime =\t"+ (1E6*line_time)+"\tus");
System.out.println("monochrome =\t"+ monochrome+"\t");
System.out.println("lwir =\t"+ lwir+"\t");
System.out.println("distortionRadius =\t"+ distortionRadius+"\tmm");
System.out.println("'=== Common input parameters ===");
System.out.println("distortionA8 =\t"+ distortionA8);
......
......@@ -5,24 +5,28 @@ import java.util.concurrent.atomic.AtomicInteger;
import com.elphel.imagej.common.ShowDoubleFloatArrays;
import com.elphel.imagej.gpu.GPUTileProcessor;
import com.elphel.imagej.gpu.GpuQuad;
import com.elphel.imagej.gpu.TpTask;
//import Jama.Matrix;
public class ImageDtt extends ImageDttCPU {
public boolean debug_strengths = false; // true;
private final GPUTileProcessor.GpuQuad gpuQuad;
private final GpuQuad gpuQuad;
public ImageDtt(
int numSensors,
int transform_size,
ImageDttParameters imgdtt_params,
boolean aux,
boolean mono,
boolean lwir,
double scale_strengths,
GPUTileProcessor.GpuQuad gpuQuadIn){
GpuQuad gpuQuadIn){
super ( numSensors,
transform_size,
imgdtt_params,
aux,
mono,
lwir,
scale_strengths);
......@@ -33,19 +37,21 @@ public class ImageDtt extends ImageDttCPU {
int numSensors,
int transform_size,
ImageDttParameters imgdtt_params,
boolean aux,
boolean mono,
boolean lwir,
double scale_strengths){
super ( numSensors,
transform_size,
imgdtt_params,
aux,
mono,
lwir,
scale_strengths);
gpuQuad = null;
}
public GPUTileProcessor.GpuQuad getGPU() {
public GpuQuad getGPU() {
return this.gpuQuad;
}
......@@ -272,7 +278,7 @@ public class ImageDtt extends ImageDttCPU {
final boolean use_main = geometryCorrection_main != null;
boolean [] used_corrs = new boolean[1];
final int all_pairs = imgdtt_params.dbg_pair_mask; //TODO: use tile tasks
final GPUTileProcessor.TpTask[] tp_tasks = gpuQuad.setTpTask(
final TpTask[] tp_tasks = gpuQuad.setTpTask(
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
disparity_corr, // final double disparity_corr,
used_corrs, // final boolean [] need_corrs, // should be initialized to boolean[1] or null
......@@ -319,13 +325,13 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad.execSetTilesOffsets(); // prepare tiles offsets in GPU memory
if ((fdisp_dist != null) || (fpxpy != null)) {
final GPUTileProcessor.TpTask[] tp_tasks_full = gpuQuad.getTasks(use_main);
final TpTask[] tp_tasks_full = gpuQuad.getTasks(use_main);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int indx_tile = ai.getAndIncrement(); indx_tile < tp_tasks_full.length; indx_tile = ai.getAndIncrement()) {
GPUTileProcessor.TpTask task = tp_tasks_full[indx_tile];
TpTask task = tp_tasks_full[indx_tile];
if (fdisp_dist != null) {
fdisp_dist[task.getTileY()][task.getTileX()] = task.getDispDist();
}
......@@ -688,7 +694,7 @@ public class ImageDtt extends ImageDttCPU {
final int globalDebugLevel)
{
// prepare tasks
GPUTileProcessor.TpTask[] tp_tasks = gpuQuad.setInterTasks(
TpTask[] tp_tasks = gpuQuad.setInterTasks(
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
geometryCorrection, // final GeometryCorrection geometryCorrection,
disparity_corr, // final double disparity_corr,
......@@ -701,7 +707,6 @@ public class ImageDtt extends ImageDttCPU {
fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
fcorr_combo_td, // [4][tilesY][tilesX][pair][4*64] TD of combo corrs: quad, cross, hor,vert
geometryCorrection,
// disparity_corr, // disparity offset at infinity
margin, // do not use tiles if their centers are closer to the edges
gpu_sigma_r, // 0.9, 1.1
gpu_sigma_b, // 0.9, 1.1
......@@ -718,7 +723,7 @@ public class ImageDtt extends ImageDttCPU {
public void quadCorrTD(
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
final GPUTileProcessor.TpTask[] tp_tasks,
final TpTask[] tp_tasks,
final float [][][][] fcorr_td, // [tilesY][tilesX][pair][4*64] transform domain representation of 6 corr pairs
final float [][][][] fcorr_combo_td, // [4][tilesY][tilesX][pair][4*64] TD of combo corrs: quad, cross, hor,vert
final GeometryCorrection geometryCorrection,
......@@ -865,7 +870,7 @@ public class ImageDtt extends ImageDttCPU {
public GPUTileProcessor.TpTask[][] clt_aberrations_quad_corr_GPU_test(
public TpTask[][] clt_aberrations_quad_corr_GPU_test(
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
final int macro_scale, // to correlate tile data instead of the pixel data: 1 - pixels, 8 - tiles
final int [][] tile_op, // [tilesY][tilesX] - what to do - 0 - nothing for this tile
......@@ -1105,7 +1110,7 @@ public class ImageDtt extends ImageDttCPU {
final boolean use_main = geometryCorrection_main != null;
boolean [] used_corrs = new boolean[1];
final int all_pairs = imgdtt_params.dbg_pair_mask; //TODO: use tile tasks
final GPUTileProcessor.TpTask[] tp_tasks = gpuQuad.setTpTask(
final TpTask[] tp_tasks = gpuQuad.setTpTask(
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
disparity_corr, // final double disparity_corr,
used_corrs, // final boolean [] need_corrs, // should be initialized to boolean[1] or null
......@@ -1151,17 +1156,17 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad.execSetTilesOffsets(); // prepare tiles offsets in GPU memory
GPUTileProcessor.TpTask[][] test_tasks = new GPUTileProcessor.TpTask[3][];
TpTask[][] test_tasks = new TpTask[3][];
test_tasks[2] = tp_tasks;
if ((fdisp_dist != null) || (fpxpy != null)) {
final GPUTileProcessor.TpTask[] tp_tasks_full = gpuQuad.getTasks(use_main);
final TpTask[] tp_tasks_full = gpuQuad.getTasks(use_main);
test_tasks[0] = tp_tasks_full;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int indx_tile = ai.getAndIncrement(); indx_tile < tp_tasks_full.length; indx_tile = ai.getAndIncrement()) {
GPUTileProcessor.TpTask task = tp_tasks_full[indx_tile];
TpTask task = tp_tasks_full[indx_tile];
if (fdisp_dist != null) {
fdisp_dist[task.getTileY()][task.getTileX()] = task.getDispDist();
}
......@@ -1179,14 +1184,14 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad.execSetTilesOffsets(); // prepare tiles offsets in GPU memory
final GPUTileProcessor.TpTask[] tp_tasks_full = gpuQuad.getTasks(use_main); // reads the same
final TpTask[] tp_tasks_full = gpuQuad.getTasks(use_main); // reads the same
test_tasks[1] = tp_tasks_full;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
@Override
public void run() {
for (int indx_tile = ai.getAndIncrement(); indx_tile < tp_tasks_full.length; indx_tile = ai.getAndIncrement()) {
GPUTileProcessor.TpTask task = tp_tasks_full[indx_tile];
TpTask task = tp_tasks_full[indx_tile];
if (fpxpy != null) {
fpxpy_test[task.getTileY()][task.getTileX()] = task.getXY(use_main); // boolean use_aux);
}
......@@ -1637,20 +1642,6 @@ public class ImageDtt extends ImageDttCPU {
// final double [] col_weights= new double [numcol]; // colors are RBG
final double [][] dbg_distort = debug_distort? (new double [4*quad][tilesX*tilesY]) : null;
// not yet used with GPU
/**
final double [][] corr_wnd = Corr2dLMA.getCorrWnd(
transform_size,
imgdtt_params.lma_wnd);
final double [] corr_wnd_inv_limited = (imgdtt_params.lma_min_wnd <= 1.0)? new double [corr_wnd.length * corr_wnd[0].length]: null;
if (corr_wnd_inv_limited != null) {
double inv_pwr = imgdtt_params.lma_wnd_pwr - (imgdtt_params.lma_wnd - 1.0); // compensate for lma_wnd
for (int i = imgdtt_params.lma_hard_marg; i < (corr_wnd.length - imgdtt_params.lma_hard_marg); i++) {
for (int j = imgdtt_params.lma_hard_marg; j < (corr_wnd.length - imgdtt_params.lma_hard_marg); j++) {
corr_wnd_inv_limited[i * (corr_wnd.length) + j] = 1.0/Math.max(Math.pow(corr_wnd[i][j],inv_pwr), imgdtt_params.lma_min_wnd);
}
}
}
*/
// keep for now for mono, find out what do they mean for macro mode
final int corr_size = transform_size * 2 - 1;
......
......@@ -295,6 +295,7 @@ public class MacroCorrelation {
geometryCorrection.getNumSensors(),
clt_parameters.transform_size,
clt_parameters.img_dtt,
this.mtp.isAux(),
this.mtp.isMonochrome(),
this.mtp.isLwir(),
clt_parameters.getScaleStrength(this.mtp.isAux()));
......
......@@ -6160,6 +6160,7 @@ ImageDtt.startAndJoin(threads);
geometryCorrection.getNumSensors(),
clt_parameters.transform_size,
clt_parameters.img_dtt,
isAux(),
isMonochrome(),
isLwir(),
clt_parameters.getScaleStrength(is_aux));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment