Commit 4125cbb1 authored by Andrey Filippov's avatar Andrey Filippov

Fixing conversion from calibration to correction.

parent 4bbac7dc
......@@ -84,6 +84,7 @@ public class CLTParameters {
public double scale_b = 1.0;
public double vignetting_max = 0.4; // value in vignetting data to correspond to 1x in the kernel
public double vignetting_range = 5.0; // do not try to correct vignetting less than vignetting_max/vignetting_range
// NOT used !
public int kernel_step = 16; // source kernels step in pixels (have 1 kernel margin on each side)
public double disparity = 0.0; // nominal disparity between side of square cameras (pix)
public double z_correction = 0.0; // Inverse distance to infinity (misalignment correction)
......
......@@ -14,6 +14,7 @@ public class ColorProcParameters {
public boolean lwir_autorange = true;
public double lwir_too_cold = 100.0; // discard this number of pixels too cold
public double lwir_too_hot = 3.0; // discard this number of pixels too hot
public boolean lwir_pseudocolor = true;
public int lwir_palette = 0; // 0 - white - hot, 1 - black - hot, 2+ - colored
public boolean lwir_subtract_dc = false;
public boolean lwir_eq_chn = true; // adjust average temperature between channels
......@@ -84,7 +85,7 @@ public class ColorProcParameters {
boolean lwir_autorange, // true;
double lwir_too_cold, // 100.0; // discard this number of pixels too cold
double lwir_too_hot, // 3.0; // discard this number of pixels too hot
boolean lwir_pseudocolor,
int lwir_palette, // 0 - white - hot, 1 - black - hot, 2+ - colored
boolean lwir_subtract_dc, // = false;
boolean lwir_eq_chn, // true
......@@ -144,6 +145,7 @@ public class ColorProcParameters {
this.lwir_too_cold = lwir_too_cold;
this.lwir_too_hot = lwir_too_hot;
this.lwir_pseudocolor = lwir_pseudocolor;
this.lwir_palette = lwir_palette;
this.lwir_subtract_dc = lwir_subtract_dc;
this.lwir_eq_chn =lwir_eq_chn;
......@@ -204,6 +206,7 @@ public class ColorProcParameters {
properties.setProperty(prefix+"lwir_too_cold", this.lwir_too_cold+"");
properties.setProperty(prefix+"lwir_too_hot", this.lwir_too_hot+"");
properties.setProperty(prefix+"lwir_pseudocolor", this.lwir_pseudocolor+"");
properties.setProperty(prefix+"lwir_palette", this.lwir_palette+"");
properties.setProperty(prefix+"lwir_subtract_dc", this.lwir_subtract_dc+"");
properties.setProperty(prefix+"lwir_eq_chn", this.lwir_eq_chn+"");
......@@ -273,6 +276,7 @@ public class ColorProcParameters {
if (properties.getProperty(prefix+"lwir_too_cold")!=null) this.lwir_too_cold=Double.parseDouble(properties.getProperty(prefix+"lwir_too_cold"));
if (properties.getProperty(prefix+"lwir_too_hot")!=null) this.lwir_too_hot=Double.parseDouble(properties.getProperty(prefix+"lwir_too_hot"));
if (properties.getProperty(prefix+"lwir_pseudocolor")!=null) this.lwir_pseudocolor=Boolean.parseBoolean(properties.getProperty(prefix+"lwir_pseudocolor"));
if (properties.getProperty(prefix+"lwir_palette")!=null) this.lwir_palette=Integer.parseInt(properties.getProperty(prefix+"lwir_palette"));
if (properties.getProperty(prefix+"lwir_subtract_dc")!=null) this.lwir_subtract_dc=Boolean.parseBoolean(properties.getProperty(prefix+"lwir_subtract_dc"));
if (properties.getProperty(prefix+"lwir_eq_chn")!=null) this.lwir_eq_chn=Boolean.parseBoolean(properties.getProperty(prefix+"lwir_eq_chn"));
......@@ -349,7 +353,8 @@ public class ColorProcParameters {
gd.addNumericField("Number of too cold pixels/image to ignore during autorange", this.lwir_too_cold, 4); // 100.0
gd.addNumericField("Number of too hot pixels/image to ignore during autorange", this.lwir_too_hot, 4); // 0.53
gd.addNumericField("LWIR pallet (0-white hot, 1-black hot, 2+ - pseudo colors ", this.lwir_palette, 0);
gd.addCheckbox ("Use pseudocolors for LWIR (unchecked - floating point monochrome)", this.lwir_pseudocolor);
gd.addNumericField("LWIR pallete (0-white hot, 1-black hot, 2+ - pseudo colors ", this.lwir_palette, 0);
gd.addCheckbox ("Subtract each image DC when conditioning", this.lwir_subtract_dc);
gd.addCheckbox ("Adjust average temperature between cameras", this.lwir_eq_chn);
......@@ -437,6 +442,7 @@ public class ColorProcParameters {
this.lwir_too_cold= gd.getNextNumber();
this.lwir_too_hot= gd.getNextNumber();
this.lwir_pseudocolor= gd.getNextBoolean();
this.lwir_palette= (int) gd.getNextNumber();
this.lwir_subtract_dc= gd.getNextBoolean();
this.lwir_eq_chn= gd.getNextBoolean();
......@@ -503,6 +509,7 @@ public class ColorProcParameters {
cp.lwir_too_cold = this.lwir_too_cold;
cp.lwir_too_hot = this.lwir_too_hot;
cp.lwir_pseudocolor = this.lwir_pseudocolor;
cp.lwir_palette = this.lwir_palette;
cp.lwir_subtract_dc = this.lwir_subtract_dc;
cp.lwir_eq_chn = this.lwir_eq_chn;
......@@ -567,6 +574,7 @@ public class ColorProcParameters {
this.lwir_too_cold = cp.lwir_too_cold;
this.lwir_too_hot = cp.lwir_too_hot;
this.lwir_pseudocolor = cp.lwir_pseudocolor;
this.lwir_palette = cp.lwir_palette;
this.lwir_subtract_dc = cp.lwir_subtract_dc;
this.lwir_eq_chn = cp.lwir_eq_chn;
......
......@@ -145,6 +145,9 @@ import ij.process.ImageProcessor;
public void showArrays(float[][] pixels, int width, int height, boolean asStack, String title, String [] titles) {
int i,j;
if (pixels == null) {
return;
}
if (asStack) {
float [] fpixels;
ImageStack array_stack=new ImageStack(width,height);
......
......@@ -276,6 +276,7 @@ public class Eyesis_Correction implements PlugIn, ActionListener {
true, // boolean lwir_autorange, // true;
100.0, // double lwir_too_cold, // 100.0; // discard this number of pixels too cold
3.0, // double lwir_too_hot, // 3.0; // discard this number of pixels too hot
true, // lwir_pseudocolor,
1, // int lwir_palette, // 0 - white - hot, 1 - black - hot, 2+ - pseudocolored
false, // boolean lwir_subtract_dc, // = false;
true, // boolean lwir_eq_chn = true; // adjust average temperature between channels
......@@ -7381,6 +7382,9 @@ public class Eyesis_Correction implements PlugIn, ActionListener {
*/
public boolean adjustLYSeries(boolean use_aux) {
MultisceneLY.MSLY_MODE adjust_mode = MultisceneLY.MSLY_MODE.INF_NOINF;
if (CLT_PARAMETERS.ofp.pattern_mode) {
adjust_mode = MultisceneLY.MSLY_MODE.NOINF_ONLY;
}
long startTime = System.nanoTime();
// load needed sensor and kernels files
if (!prepareRigImages())
......
......@@ -116,7 +116,7 @@ public class GPUTileProcessor {
// public static int IMG_HEIGHT = 1936;
static int KERNELS_HOR = 164;
static int KERNELS_VERT = 123;
static int KERNELS_LSTEP = 4;
static int KERNELS_LSTEP = 3; // 4;// FIXME: Make it dynamic: 3 for LWIR, 4 - for RGB?)
static int THREADS_PER_TILE = 8;
static int TILES_PER_BLOCK = 4; // 8 - slower
static int CORR_THREADS_PER_TILE = 8;
......
......@@ -3190,6 +3190,41 @@ public class GpuQuad{ // quad camera description
}
public float [][] getRBG (int ncam){
int gpu_height = (img_height + GPUTileProcessor.DTT_SIZE);
int gpu_width = (img_width + GPUTileProcessor.DTT_SIZE);
int gpu_img_size = gpu_width * gpu_height;
int rslt_img_size = img_height * img_width; // width * height;
float [] cpu_corr_image = new float [ num_colors * gpu_img_size];
int gpu_width_in_bytes = gpu_width *Sizeof.FLOAT;
// for copying results to host
CUDA_MEMCPY2D copyD2H = new CUDA_MEMCPY2D();
copyD2H.srcMemoryType = CUmemorytype.CU_MEMORYTYPE_DEVICE;
copyD2H.srcDevice = gpu_corr_images_h[ncam]; // ((test & 1) ==0) ? src_dpointer : dst_dpointer; // copy same data
copyD2H.srcPitch = imclt_stride*Sizeof.FLOAT;
copyD2H.dstMemoryType = CUmemorytype.CU_MEMORYTYPE_HOST;
copyD2H.dstHost = Pointer.to(cpu_corr_image);
copyD2H.dstPitch = gpu_width_in_bytes;
copyD2H.WidthInBytes = gpu_width_in_bytes;
copyD2H.Height = num_colors * gpu_height; // /2;
cuMemcpy2D(copyD2H); // run copy
float [][] fimg = new float [num_colors][ rslt_img_size];
for (int ncol = 0; ncol < num_colors; ncol++) {
int tl_offset = (GPUTileProcessor.DTT_SIZE/2) * (gpu_width + 1) + ncol*gpu_img_size;
for (int nrow=0; nrow < img_height; nrow++) {
// System.arraycopy(cpu_corr_image, ncol*gpu_img_size, fimg[ncol], 0, rslt_img_size);
System.arraycopy(cpu_corr_image, tl_offset + (gpu_width * nrow), fimg[ncol], img_width * nrow, img_width);
}
}
return fimg;
}
@Deprecated
public float [][] getRBGuntrimmed (int ncam){
int height = (img_height + GPUTileProcessor.DTT_SIZE);
int width = (img_width + GPUTileProcessor.DTT_SIZE);
int rslt_img_size = width * height;
......@@ -3207,7 +3242,6 @@ public class GpuQuad{ // quad camera description
copyD2H.dstPitch = width_in_bytes;
copyD2H.WidthInBytes = width_in_bytes;
// copyD2H.Height = 3 * height; // /2;
copyD2H.Height = num_colors * height; // /2;
cuMemcpy2D(copyD2H); // run copy
......@@ -3219,6 +3253,8 @@ public class GpuQuad{ // quad camera description
return fimg;
}
@Deprecated
public void getTileSubcamOffsets(
final TpTask[] tp_tasks, // will use // modify to have offsets for 8 cameras
......
......@@ -326,6 +326,9 @@ public class ElphelTiffReader extends TiffReader{ // BaseTiffReader {
throw new ServiceException("Could not read EXIF data", e);
}
}
if (directory==null) { // trying to read ImageJ file 640x512
return;
}
Date date = directory.getDate(ExifSubIFDDirectory.TAG_DATETIME_ORIGINAL);
if (date != null) {
......
......@@ -31,6 +31,7 @@ package com.elphel.imagej.readers;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
......@@ -46,9 +47,17 @@ import java.util.Iterator;
import java.util.Properties;
import java.util.Set;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.compress.utils.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import com.elphel.imagej.common.ShowDoubleFloatArrays;
import com.elphel.imagej.tileprocessor.TileNeibs;
......@@ -213,6 +222,11 @@ public class ImagejJp4Tiff {
ImagePlus imp= null;
bytes = reader.openBytes(0);
int bpp = reader.getBitsPerPixel();
if (bpp == 32) { // already ImageJ file - just read it and decode properties
imp = new ImagePlus(content_fileName);
decodeProperiesFromInfo(imp);
return imp;
}
boolean is_le = reader.isLittleEndian();
int bytes_per_pixel = (bpp + 7) / 9;
......@@ -351,6 +365,43 @@ public class ImagejJp4Tiff {
return imp;
}
public static boolean decodeProperiesFromInfo(ImagePlus imp){
if (imp.getProperty("Info")==null) return false;
String xml= (String) imp.getProperty("Info");
DocumentBuilder db=null;
try {
db = DocumentBuilderFactory.newInstance().newDocumentBuilder();
} catch (ParserConfigurationException e) {
return false;
}
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document doc = null;
try {
doc = db.parse(is);
} catch (SAXException e) {
return false;
} catch (IOException e) {
return false;
}
NodeList allNodes=doc.getDocumentElement().getElementsByTagName("*");
for (int i=0;i<allNodes.getLength();i++) {
String name= allNodes.item(i).getNodeName();
String value="";
try {
value=allNodes.item(i).getFirstChild().getNodeValue();
} catch(Exception e) {
}
imp.setProperty(name, value);
}
return true;
}
public float [] deGammaScale(float [] pixels, int width, Hashtable<String, Object> meta_hash, boolean degamma, boolean scale) {
int height = pixels.length/width;
String prefix = ElphelTiffReader.ELPHEL_PROPERTY_PREFIX;
......
......@@ -621,6 +621,10 @@ public class CorrVector{ // TODO: Update to non-quad (extract to a file first)?
return toString(false);
}
public double getTiltAzPerPixel() {
return 1.0/1000.0*geometryCorrection.focalLength/geometryCorrection.pixelSize;
}
public String toString(boolean short_out) {
String s;
double [] sym_vect = toSymArray(null);
......
......@@ -360,7 +360,7 @@ public class ImageDtt extends ImageDttCPU {
if (iclt_fimg != null) {
gpuQuad.execImcltRbgAll(isMonochrome()); // execute GPU kernel
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // retrieve data from GPU
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // retrieve data from GPU (not used !)
}
} else {gpuQuad.execImcltRbgAll(isMonochrome());} // just for testing
// does it need texture tiles to be output?
......@@ -1554,7 +1554,7 @@ public class ImageDtt extends ImageDttCPU {
if (iclt_fimg != null) {
gpuQuad.execImcltRbgAll(isMonochrome()); // execute GPU kernel
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) {
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // retrieve data from GPU
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // retrieve data from GPU not used, but now width/height are nominal, not increased
}
} else {gpuQuad.execImcltRbgAll(isMonochrome());} // just for testing
// does it need texture tiles to be output?
......
......@@ -93,18 +93,31 @@ public class IntersceneLma {
}
public String [] printOldNew(boolean allvectors, int w, int d) {
String fmt1 = String.format("%%%d.%df", w,d);
ArrayList<String> lines = new ArrayList<String>();
for (int n = ErsCorrection.DP_DVAZ; n < ErsCorrection.DP_NUM_PARS; n+=3) {
boolean adj = false;
for (int i = 0; i <3; i++) adj |= par_mask[n+i];
if (allvectors || adj) {
String line = printNameV3(n, false, w,d)+" (was "+printNameV3(n, true, w,d)+")";
line += ", diff="+String.format(fmt1, getV3Diff(n));
lines.add(line);
}
}
return lines.toArray(new String[lines.size()]);
}
public double getV3Diff(int indx) {
double [] v_new = new double[3], v_old = new double[3];
System.arraycopy(getFullVector(parameters_vector), indx, v_new, 0, 3);
System.arraycopy(backup_parameters_full, indx, v_old, 0, 3);
double l2 = 0;
for (int i = 0; i < 3; i++) {
l2 += (v_new[i]-v_old[i]) * (v_new[i]-v_old[i]);
}
return Math.sqrt(l2);
}
public String printNameV3(int indx, boolean initial, int w, int d) {
double [] full_vector = initial? backup_parameters_full: getFullVector(parameters_vector);
double [] vector = new double[3];
......@@ -283,6 +296,11 @@ public class IntersceneLma {
if (show_intermediate && (debug_level > 0)) {
System.out.println("LMA: full RMS="+last_rms[0]+" ("+initial_rms[0]+"), pure RMS="+last_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda);
}
String [] lines1 = printOldNew(false); // boolean allvectors)
System.out.print("iteration="+iter);
for (String line : lines1) {
System.out.println(line);
}
if (debug_level > 0) {
if ((debug_level > 1) || (iter == 1) || last_run) {
if (!show_intermediate) {
......
......@@ -339,7 +339,7 @@ public class MacroCorrelation {
geometryCorrection, // final GeometryCorrection geometryCorrection,
null, // final GeometryCorrection geometryCorrection_main, // if not null correct this camera (aux) to the coordinates of the main
null, // clt_kernels, // final double [][][][][][] clt_kernels, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step,
// clt_parameters.kernel_step,
/// clt_parameters.transform_size,
clt_parameters.clt_window,
shiftXY, //
......
......@@ -591,12 +591,10 @@ public class MultisceneLY {
int clustersX = (int) Math.ceil(1.0 * tilesX / clt_parameters.lyms_clust_size);
int clustersY = (int) Math.ceil(1.0 * tilesY / clt_parameters.lyms_clust_size);
int clusters = clustersX * clustersY;
int numSens = last_scene.tp.getNumSensors();
final int [][] num_tiles = (in_num_tiles != null)? in_num_tiles : (new int [2][]);
for (int i = 0; i < num_tiles.length; i++) {
num_tiles[i] = new int [clusters];
}
// int [][] num_tiles = new int [2][clusters]; // may be null;; // null;
boolean [] inf_cluster = new boolean [clusters]; // null;
boolean debug = debug_level > -2;
// get for infinity only
......@@ -718,6 +716,18 @@ public class MultisceneLY {
return inf_noinf_lazy_eye_data;
}
/**
* Display measure Lazy Eye data
*
* @param clt_parameters configured parameters
* @param gc GeometryCorrection instance (e.g from one scene), needed to create
* and initialize ExtrinsicAdjustment instance
* @param clustersX number of clusters in a row
* @param clustersY number of clusters rows
* @param ly_data Lazy Eye data - array in cluster line-scan order, each may be null
* or an array of LY data
* @param title Image title to use
*/
public static void showLY(
CLTParameters clt_parameters,
GeometryCorrection gc,
......@@ -752,6 +762,18 @@ public class MultisceneLY {
ea.data_titles); // ExtrinsicAdjustment.DATA_TITLES);
}
/**
* Update per-tile target_disparity values for non-infinity tiles using LY data
* (LMA disparity difference) adding that values to all tiles of the corresponding
* clusters in each scene where they are defined (non-NAN).
*
* @param clt_parameters configuration parameters
* @param tp TileProcessor instance
* @param target_disparities per-scene, per-tile array of target disparities
* (NaN for undefined tiles)
* @param ly_data Lazy Eye data (only differential disparity used)
* @param threadsMax maximal number of threads
*/
private static void updateTargetDisparities(
final CLTParameters clt_parameters,
final TileProcessor tp,
......@@ -938,7 +960,7 @@ public class MultisceneLY {
dcorr_td, // final double [][][][] dcorr_td, // [tile][pair][4][64] sparse by pair transform domain representation of corr pairs
// no combo here - rotate, combine in pixel domain after interframe
scene.getCltKernels(), // clt_kernels, // final double [][][][][][] clt_kernels, // [sensor][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step, // final int kernel_step,
// clt_parameters.kernel_step, // final int kernel_step,
clt_parameters.clt_window, // final int window_type,
clt_parameters.corr_red, // final double corr_red,
clt_parameters.corr_blue, // final double corr_blue,
......@@ -1047,7 +1069,7 @@ public class MultisceneLY {
wh, // final int [] wh,
threadsMax, // final int threadsMax, // maximal number of threads to launch
debug_level); // final int globalDebugLevel)
if (accum_2d_img != null) {
String [] titles = new String [accum_2d_img.length]; // dcorr_tiles[0].length];
int ind_length = image_dtt.getCorrelation2d().getCorrTitles().length;
......@@ -1062,6 +1084,7 @@ public class MultisceneLY {
true,
last_scene.getImageName()+"-CORR-DECIMATED"+clust_size+"-"+debug_suffix,
titles);
}
double [][] disparity_map_decimated = ImageDtt.corr2d_decimate( // not used in lwir
disparity_map, // final float [][] corr2d_img,
tilesX, // final int tilesX,
......@@ -1260,6 +1283,20 @@ public class MultisceneLY {
return combo_pXpYD;
}
/**
* Merge Lazy Eye data from two separate ones - for infinity (measuring disparity offset
* with a goal to have disparity be exactly zero for objects at infinity (such as clouds)
* and "lazy eye" measurements. It is needed as there are too few measurements for infinity
* objects, so lazy eye data is measured separately for the same tiles and combined with
* disparity measurements for infinity only
* @param adjust_mode uses enum MSLY_MODE: INF_ONLY, NOINF_ONLY, and INF_NOINF (typical)
* @param lazy_eye_data2 [2][clusters][LY parameters] [0][][] - infinity measurements
* (disparity at infinity is used) ,[1][][] - non-infinity measurements (LY data
* is used).
* @param force_disparity if non null, should be initialized as boolean[clusters], will be
* updated to have true for tiles that have disparity data (offset for infinity)
* @return combined LY array, compatible with (older) single-scene LY adjustment.
*/
public static double [][] mergeLY(
MSLY_MODE adjust_mode,
double [][][] lazy_eye_data2,
......@@ -1331,8 +1368,22 @@ public class MultisceneLY {
return lazy_eye_data;
}
public boolean processLYdata(
/**
* A placeholder to move from TwoQuadCLT
* @param clt_parameters
* @param adjust_mode
* @param scenes
* @param lazy_eye_data2
* @param valid_tile
* @param inf_disp_ref
* @param is_scene_infinity
* @param update_disparity
* @param threadsMax
* @param updateStatus
* @param debugLevel
* @return
*/
public static boolean processLYdata(
final CLTParameters clt_parameters,
MSLY_MODE adjust_mode,
final QuadCLT [] scenes, // ordered by increasing timestamps
......@@ -1388,6 +1439,25 @@ public class MultisceneLY {
}
// apply delta to each parameter, perform LY measurement and calculate difference
/**
* Measure approximate derivatives of the LY data the subcameras poses
* @param clt_parameters configuration parameters
* @param scenes array of consecutive camera scenes, ordered by increasing timestamps
* @param lazy_eye_data2 a pair of {infinity LY, non-infinity LY}
* @param valid_tile per-scene, per tile array of "valid" tiles - ones that have exactly
* one obtained with LMA correlation maximum
* @param inf_disp_ref avarage disparity of infinity tiles
* @param is_scene_infinity (may be null) - per scene, per tile - tile predicted to be
* of infinity object, determined by applying rotation+movement of the composite
* depth map
* @param threadsMax maximal number of threads
* @param delta delta for measuring derivatives, scaled for different parameters internally.
* Typical value 0.001 (0.01 is too high for some tiles)
* @param use_tarz if true - use sensor pose angles (tilt, azimuth, roll, zoom), false -
* use symmetrical parameters (linear combination of T,A,R,Z)
* @param debugLevel debug level (>-2 - threshold)
*/
public static void debugLYDerivatives(
final CLTParameters clt_parameters,
final QuadCLT [] scenes, // ordered by increasing timestamps
......@@ -1395,40 +1465,11 @@ public class MultisceneLY {
final boolean [][] valid_tile, // tile with lma and single correlation maximum
final double inf_disp_ref, // average disparity at infinity for ref scene // is_scene_infinity
final boolean [][] is_scene_infinity, // may be null, if not - may be infinity from the composite depth map
boolean update_disparity, // re-measure disparity before measuring LY
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
double delta,
boolean use_tarz, // derivatives by tarz, not symmetrical vectors
final int debugLevel)
{
// final String [] sinf_noinf= {"inf","noinf"};
// delta = 0.001;
/*double [] parameter_scales4 = { // multiply delay for each parameter
0.3, // 0.014793657667505566, // 00 10 tilt0
0.3, // 0.015484017460841183, // 01 10 tilt1
0.3, // 0.02546712771769517, // 02 10 tilt2
0.3, // 0.02071573747995167, // 03 10 az0
0.3, // 0.026584237444512468, // 04 10 az1
0.3, // 0.014168012698804967, // 05 10 az2
2.0, // 1.8554483718240792E-4,// 06 roll0
0.3, //2.3170738149889717E-4, // 07 roll1
0.3, //3.713239026512266E-4, // 08 roll2
0.3, //2.544834643007531E-4, // 09 roll3
0.3, // 2.5535557646736286E-4, // 10 zoom0
0.3, // 1.98531249109261E-4, // 11 zoom1
0.3, // 2.1802727086879284E-4, // 12 zoom2
150, // 8.814346720176489E-1, // 5, // 13 10000x omega-tilt
150, // 7.071297501674136E-1, // 5, // 14 10000x omega az
150, // 1.306306793587865E-0, // 4, // 15 10000x omega roll
300, // 2.8929916645453735E-0, // 4, // 16 10000x vx
300, // 2.943408022525927E-0, // 4, // 17 10000x vy
500.0}; // 390.6185365641268}; //4}; // 18 100000x vz
*/
double scale_tl = 0.3;
double scale_az = 0.3;
double scale_rl0 = 2.0;
......@@ -1644,7 +1685,7 @@ public class MultisceneLY {
width,
height,
true,
"dLY_dpar_"+delta+"DINV"+(update_disparity?"U":"")+"-"+SINF_NOINF[nly],
"dLY_dpar_"+delta+"DINV"+"-"+SINF_NOINF[nly],
titles);
}
dbg_img2 = new double [ly_diff2.length][num_pars][width*height];
......@@ -1673,7 +1714,7 @@ public class MultisceneLY {
width,
height,
true,
"dLY_dpar_"+delta+"DINV"+(update_disparity?"U":"")+"-XY"+"-"+SINF_NOINF[nly],
"dLY_dpar_"+delta+"DINV"+"-XY"+"-"+SINF_NOINF[nly],
titles);
}
return;
......
......@@ -61,9 +61,21 @@ public class OpticalFlowParameters {
public boolean ignore_ers = false; // ignore velocities from individual ERS (LWIR - ignore, RGB - do not ignore
public double lpf_pairs = 5.0; // velocities LPF during pairwise fitting
public double lpf_series = 5.0; // velocities LPF during all-to-reference fitting
public boolean pattern_mode = false; // true; // not yet used
public double max_rms_maybe = 7.0;
public double max_rms_sure = 2.1; // too high - false positives (for selected set >= 2.2), too low - longer testing
public int pix_step = 20;
public int search_rad = 2; // 0;
public int center_index = -1; // when <0 find center index after pair-wise matching and set (in pattern_mode)
// "testing and debug" section
public boolean combine_empty_only = true; // false;
public boolean late_normalize_iterate = true;
public int test_corr_rad_max = 3;
public boolean show_result_images = true;
// for recalculateFlowXY()
......@@ -155,6 +167,20 @@ public class OpticalFlowParameters {
gd.addNumericField("Velocities LPF during all-to-reference fitting", this.lpf_series, 3,6,"samples",
"Half of the full width LPF to smooth ERS velocities during all scenes to reference pose fitting");
gd.addMessage("Processing factory calibration (pattern) images, some settings may be useful for field images too");
gd.addCheckbox ("Pattern mode", this.pattern_mode,
"When processing factory calibration images. Not used yet. In pattern mode XYZ adjustemnt (Inter-LMA tab) should be disabled");
gd.addNumericField("RMS larger than any possible RMS in correct fitting", this.max_rms_maybe, 3,6,"pix",
"Now not important, can be set to sufficiently large value (now 7.0)");
gd.addNumericField("Definitely good RMS for scene pair-wise fitting", this.max_rms_sure, 3,6,"pix",
"When set too high leads to false positive fitting, too low - causes too much testing around.");
gd.addNumericField("Fitting scan search step", this.pix_step,0,4,"pix",
"Search around in a spiral with specified pixel step. With the current LWIR resolution/focal length should be 20pix(safe)-50pix(probably OK).");
gd.addNumericField("Search distance", this.search_rad,0,4,"steps",
"Search in a spiral, 0 - just single center, 1 - 3x3 pattern, 2 - 5x5 pattern.");
gd.addNumericField("Center (reference) index", this.center_index,0,4,"steps",
"Will be used as a reference frame for matching all to it. If < 0 it will be calculated and set");
gd.addMessage("Testing and Debug");
gd.addCheckbox ("Consolidate correlation macrotiles only if the current macrotile is null", this.combine_empty_only,
"When false - consolidate all macrotiles, including defined ones");
......@@ -162,6 +188,9 @@ public class OpticalFlowParameters {
"Assumed true when consolidating macrotiles");
gd.addNumericField("Maximal consolidation radius to try", this.test_corr_rad_max, 0,4,"tiles",
"Test and dispaly consolidation from from zero to this radius around each macrotile");
gd.addCheckbox ("Show ERS debug images", this.show_result_images,
"To be continued - debugging EDRS for LWIR, but ERS was too small, need to debug with faster rotations.");
gd.addNumericField("Debug level for Optical Flow testing", this.debug_level_optical, 0,4,"",
"Apply to series or Optical FLow tests");
......@@ -211,9 +240,18 @@ public class OpticalFlowParameters {
this.lpf_pairs = gd.getNextNumber();
this.lpf_series = gd.getNextNumber();
this.pattern_mode = gd.getNextBoolean();
this.max_rms_maybe = gd.getNextNumber();
this.max_rms_sure = gd.getNextNumber();
this.pix_step = (int) gd.getNextNumber();
this.search_rad = (int) gd.getNextNumber();
this.center_index = (int) gd.getNextNumber();
this.combine_empty_only = gd.getNextBoolean();
this.late_normalize_iterate = gd.getNextBoolean();
this.test_corr_rad_max = (int) gd.getNextNumber();
this.show_result_images = gd.getNextBoolean();
this.debug_level_optical = (int) gd.getNextNumber();
this.debug_level_iterate = (int) gd.getNextNumber();
this.enable_debug_images = gd.getNextBoolean();
......@@ -258,9 +296,18 @@ public class OpticalFlowParameters {
properties.setProperty(prefix+"lpf_pairs", this.lpf_pairs+"");
properties.setProperty(prefix+"lpf_series", this.lpf_series+"");
properties.setProperty(prefix+"pattern_mode", this.pattern_mode+"");
properties.setProperty(prefix+"max_rms_maybe", this.max_rms_maybe+"");
properties.setProperty(prefix+"max_rms_sure", this.max_rms_sure+"");
properties.setProperty(prefix+"pix_step", this.pix_step+"");
properties.setProperty(prefix+"search_rad", this.search_rad+"");
properties.setProperty(prefix+"center_index", this.center_index+"");
properties.setProperty(prefix+"combine_empty_only", this.combine_empty_only+"");
properties.setProperty(prefix+"late_normalize_iterate", this.late_normalize_iterate+"");
properties.setProperty(prefix+"test_corr_rad_max", this.test_corr_rad_max+"");
properties.setProperty(prefix+"show_result_images", this.show_result_images+"");
properties.setProperty(prefix+"debug_level_optical", this.debug_level_optical+"");
properties.setProperty(prefix+"debug_level_iterate", this.debug_level_iterate+"");
properties.setProperty(prefix+"enable_debug_images", this.enable_debug_images+"");
......@@ -301,14 +348,22 @@ public class OpticalFlowParameters {
if (properties.getProperty(prefix+"best_neibs_num")!=null) this.best_neibs_num=Integer.parseInt(properties.getProperty(prefix+"best_neibs_num"));
if (properties.getProperty(prefix+"ref_stdev")!=null) this.ref_stdev=Double.parseDouble(properties.getProperty(prefix+"ref_stdev"));
if (properties.getProperty(prefix+"ignore_ers")!=null) this.ignore_ers=Boolean.parseBoolean(properties.getProperty(prefix+"ignore_ers"));
if (properties.getProperty(prefix+"lpf_pairs")!=null) this.lpf_pairs=Double.parseDouble(properties.getProperty(prefix+"lpf_pairs"));
if (properties.getProperty(prefix+"lpf_series")!=null) this.lpf_series=Double.parseDouble(properties.getProperty(prefix+"lpf_series"));
if (properties.getProperty(prefix+"pattern_mode")!=null) this.pattern_mode=Boolean.parseBoolean(properties.getProperty(prefix+"pattern_mode"));
if (properties.getProperty(prefix+"max_rms_maybe")!=null) this.max_rms_maybe=Double.parseDouble(properties.getProperty(prefix+"max_rms_maybe"));
if (properties.getProperty(prefix+"max_rms_sure")!=null) this.max_rms_sure=Double.parseDouble(properties.getProperty(prefix+"max_rms_sure"));
if (properties.getProperty(prefix+"pix_step")!=null) this.pix_step=Integer.parseInt(properties.getProperty(prefix+"pix_step"));
if (properties.getProperty(prefix+"search_rad")!=null) this.search_rad=Integer.parseInt(properties.getProperty(prefix+"search_rad"));
if (properties.getProperty(prefix+"center_index")!=null) this.center_index=Integer.parseInt(properties.getProperty(prefix+"center_index"));
if (properties.getProperty(prefix+"combine_empty_only")!=null) this.combine_empty_only=Boolean.parseBoolean(properties.getProperty(prefix+"combine_empty_only"));
if (properties.getProperty(prefix+"late_normalize_iterate")!=null) this.late_normalize_iterate=Boolean.parseBoolean(properties.getProperty(prefix+"late_normalize_iterate"));
if (properties.getProperty(prefix+"test_corr_rad_max")!=null) this.test_corr_rad_max=Integer.parseInt(properties.getProperty(prefix+"test_corr_rad_max"));
if (properties.getProperty(prefix+"show_result_images")!=null) this.show_result_images=Boolean.parseBoolean(properties.getProperty(prefix+"show_result_images"));
if (properties.getProperty(prefix+"debug_level_optical")!=null) this.debug_level_optical=Integer.parseInt(properties.getProperty(prefix+"debug_level_optical"));
if (properties.getProperty(prefix+"debug_level_iterate")!=null) this.debug_level_iterate=Integer.parseInt(properties.getProperty(prefix+"debug_level_iterate"));
if (properties.getProperty(prefix+"enable_debug_images")!=null) this.enable_debug_images=Boolean.parseBoolean(properties.getProperty(prefix+"enable_debug_images"));
......@@ -352,13 +407,21 @@ public class OpticalFlowParameters {
ofp.lpf_pairs = this.lpf_pairs;
ofp.lpf_series = this.lpf_series;
ofp.pattern_mode = this.pattern_mode;
ofp.max_rms_maybe = this.max_rms_maybe;
ofp.max_rms_sure = this.max_rms_sure;
ofp.pix_step = this.pix_step;
ofp.search_rad = this.search_rad;
ofp.center_index = this.center_index;
ofp.combine_empty_only = this.combine_empty_only;
ofp.late_normalize_iterate = this.late_normalize_iterate;
ofp.test_corr_rad_max = this.test_corr_rad_max;
ofp.show_result_images = this.show_result_images;
ofp.debug_level_optical = this.debug_level_optical;
ofp.debug_level_iterate = this.debug_level_iterate;
ofp.enable_debug_images = this.enable_debug_images;
return ofp;
}
}
......@@ -2080,8 +2080,8 @@ public class QuadCLT extends QuadCLTCPU {
iclt_fimg[ncam] = gpuQuad.getRBG(ncam);
}
int out_width = gpuQuad.getImageWidth() + gpuQuad.getDttSize();
int out_height = gpuQuad.getImageHeight() + gpuQuad.getDttSize();
int out_width = gpuQuad.getImageWidth();// + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int out_height = gpuQuad.getImageHeight(); // + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
if (isLwir() && colorProcParameters.lwir_autorange) {
double rel_low = colorProcParameters.lwir_low;
double rel_high = colorProcParameters.lwir_high;
......@@ -2145,7 +2145,7 @@ public class QuadCLT extends QuadCLTCPU {
/// array_stack.addSlice("port_"+slice_seq[i], results[slice_seq[i]].getProcessor().getPixels());
/// }
}
ImagePlus imp_stack = new ImagePlus(image_name+sAux()+"-SHIFTED-D"+clt_parameters.disparity, array_stack);
ImagePlus imp_stack = new ImagePlus(image_name+sAux()+"GPU-SHIFTED-D"+clt_parameters.disparity, array_stack);
imp_stack.getProcessor().resetMinAndMax();
if (only4slice) {
return imp_stack;
......@@ -2691,8 +2691,8 @@ public class QuadCLT extends QuadCLTCPU {
iclt_fimg[ncam] = quadCLT_main.getGPU().getRBG(ncam);
}
int out_width = quadCLT_main.getGPU().getImageWidth() + quadCLT_main.getGPU().getDttSize();
int out_height = quadCLT_main.getGPU().getImageHeight() + quadCLT_main.getGPU().getDttSize();
int out_width = quadCLT_main.getGPU().getImageWidth(); // + quadCLT_main.getGPU().getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int out_height = quadCLT_main.getGPU().getImageHeight(); // + quadCLT_main.getGPU().getDttSize();// 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int tilesX = quadCLT_main.getGPU().getImageWidth() / quadCLT_main.getGPU().getDttSize();
int tilesY = quadCLT_main.getGPU().getImageHeight() / quadCLT_main.getGPU().getDttSize();
......@@ -2861,7 +2861,7 @@ public class QuadCLT extends QuadCLTCPU {
array_stack.addSlice("port_"+slice_seq[i], results[slice_seq[i]].getProcessor().getPixels());
}
}
ImagePlus imp_stack = new ImagePlus(name+"-SHIFTED-D"+clt_parameters.disparity, array_stack);
ImagePlus imp_stack = new ImagePlus(name+"GPU-SHIFTED-D"+clt_parameters.disparity, array_stack);
imp_stack.getProcessor().resetMinAndMax();
if (!batch_mode) {
imp_stack.updateAndDraw();
......@@ -5338,7 +5338,7 @@ public class QuadCLT extends QuadCLTCPU {
geometryCorrection, // final GeometryCorrection geometryCorrection,
null, // final GeometryCorrection geometryCorrection_main, // if not null correct this camera (aux) to the coordinates of the main
clt_kernels, // final double [][][][][][] clt_kernels, // [channel_in_quad][color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step, // final int kernel_step,
// clt_parameters.kernel_step, // final int kernel_step,
clt_parameters.clt_window, // final int window_type,
shiftXY, // final double [][] shiftXY, // [port]{shiftX,shiftY}
disparity_corr, // final double disparity_corr, // disparity at infinity
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment