Commit 83946ca3 authored by Andrey Filippov's avatar Andrey Filippov

added preliminary inter-camera correlation, but it is not working yet

parent b6b0e4d8
......@@ -78,6 +78,7 @@ public class BiQuadParameters {
public boolean ml_keep_aux = true; // include auxiliary camera data in the ML output
public boolean ml_keep_inter = true; // include inter-camera correlation data in the ML output
public boolean ml_keep_hor_vert = true; // include combined horizontal and vertical pairs data in the ML output
public boolean ml_keep_tbrl = true; // include individual top, bottom, right, left pairs
public boolean ml_keep_debug= true; // include debug layer(s) data in the ML output
public boolean ml_8bit= true; // output in 8-bit format (default - 32-bit TIFF
public double ml_limit_extrim = 0.00001; // ignore lowest and highest values when converting to 8 bpp
......@@ -180,8 +181,11 @@ public class BiQuadParameters {
"ML output will have the second set of the layers for the auxiliary camera. Disparity values should be scaled for the camera baseline");
gd.addCheckbox ("Keep inter-camera correlation data", this.ml_keep_inter,
"Inter-camera correlation data has only one layer (and one correlation pair). It is used to generate ground truth data. Usable disparity range (measured in the main camera pixels) is ~1/5 of teh main camera");
gd.addCheckbox ("Keep combine horizonta/vertical pairs", this.ml_keep_hor_vert,
gd.addCheckbox ("Keep individual top, bottom, right, and left pairs", this.ml_keep_tbrl,
"Each of these two layers per camera are calculated from a pair of top/bottom and left/right pairs. Can possibly be used instead of originals to reduce amount of input data");
gd.addCheckbox ("Keep combined horizonta/vertical pairs", this.ml_keep_hor_vert,
"Individual horizontal and vertical pairs (4 total). Can be replaced by two combined (horizontal+vertical) ones");
gd.addCheckbox ("Keep debug layer(s)", this.ml_keep_debug,
"Keep additional (debug) layers that may change for different file versions");
gd.addCheckbox ("Use 8 bpp TIFF (default - 32 bpp)", this.ml_8bit,
......@@ -241,6 +245,7 @@ public class BiQuadParameters {
this.ml_sweep_steps= (int) gd.getNextNumber();
this.ml_keep_aux= gd.getNextBoolean();
this.ml_keep_inter= gd.getNextBoolean();
this.ml_keep_tbrl= gd.getNextBoolean();
this.ml_keep_hor_vert= gd.getNextBoolean();
this.ml_keep_debug= gd.getNextBoolean();
this.ml_8bit= gd.getNextBoolean();
......@@ -299,6 +304,7 @@ public class BiQuadParameters {
properties.setProperty(prefix+"ml_sweep_steps", this.ml_sweep_steps+"");
properties.setProperty(prefix+"ml_keep_aux", this.ml_keep_aux+"");
properties.setProperty(prefix+"ml_keep_inter", this.ml_keep_inter+"");
properties.setProperty(prefix+"ml_keep_tbrl", this.ml_keep_tbrl+"");
properties.setProperty(prefix+"ml_keep_hor_vert", this.ml_keep_hor_vert+"");
properties.setProperty(prefix+"ml_keep_debug", this.ml_keep_debug+"");
properties.setProperty(prefix+"ml_8bit", this.ml_8bit+"");
......@@ -355,6 +361,7 @@ public class BiQuadParameters {
if (properties.getProperty(prefix+"ml_sweep_steps")!=null) this.ml_sweep_steps=Integer.parseInt(properties.getProperty(prefix+"ml_sweep_steps"));
if (properties.getProperty(prefix+"ml_keep_aux")!=null) this.ml_keep_aux=Boolean.parseBoolean(properties.getProperty(prefix+"ml_keep_aux"));
if (properties.getProperty(prefix+"ml_keep_inter")!=null) this.ml_keep_inter=Boolean.parseBoolean(properties.getProperty(prefix+"ml_keep_inter"));
if (properties.getProperty(prefix+"ml_keep_tbrl")!=null) this.ml_keep_tbrl=Boolean.parseBoolean(properties.getProperty(prefix+"ml_keep_tbrl"));
if (properties.getProperty(prefix+"ml_keep_hor_vert")!=null) this.ml_keep_hor_vert=Boolean.parseBoolean(properties.getProperty(prefix+"ml_keep_hor_vert"));
if (properties.getProperty(prefix+"ml_keep_debug")!=null) this.ml_keep_debug=Boolean.parseBoolean(properties.getProperty(prefix+"ml_keep_debug"));
if (properties.getProperty(prefix+"ml_8bit")!=null) this.ml_8bit=Boolean.parseBoolean(properties.getProperty(prefix+"ml_8bit"));
......@@ -411,6 +418,7 @@ public class BiQuadParameters {
bqp.ml_sweep_steps= this.ml_sweep_steps;
bqp.ml_keep_aux= this.ml_keep_aux;
bqp.ml_keep_inter= this.ml_keep_inter;
bqp.ml_keep_tbrl= this.ml_keep_tbrl;
bqp.ml_keep_hor_vert= this.ml_keep_hor_vert;
bqp.ml_keep_debug= this.ml_keep_debug;
bqp.ml_8bit= this.ml_8bit;
......
......@@ -476,12 +476,14 @@ public class EyesisCorrections {
}
if (correctionsParameters.isJP4()) imp=JP4_INSTANCE.demuxImage(imp_composite, subChannel);
if (imp==null) imp=imp_composite; // not a composite image
int [] widthHeight={imp.getWidth(),imp.getHeight()};
this.channelWidthHeight[srcChannel]=widthHeight;
// int [] widthHeight={imp.getWidth(),imp.getHeight()};
// this.channelWidthHeight[srcChannel]=widthHeight;
this.channelVignettingCorrection[srcChannel]=this.pixelMapping.getBayerFlatFieldFloat(
srcChannel,
this.channelWidthHeight[srcChannel][0],
this.channelWidthHeight[srcChannel][1],
// this.channelWidthHeight[srcChannel][0],
// this.channelWidthHeight[srcChannel][1],
bayer);
if (this.debugLevel>0){
System.out.println("Created vignetting info for channel "+srcChannel+
......
......@@ -85,6 +85,11 @@ public class GeometryCorrection {
public RigOffset rigOffset = null;
public int [] getSensorWH() {
int [] wh = {this.pixelCorrectionWidth, this.pixelCorrectionHeight};
return wh;
}
public GeometryCorrection(double [] extrinsic_corr)
{
this.extrinsic_corr = new CorrVector(extrinsic_corr);
......@@ -98,6 +103,11 @@ public class GeometryCorrection {
return (use_rig && (rigOffset != null)) ? rigOffset.rXY_aux: rXY ;
}
// public double [] getAuxOffset(boolean use_rig){
// double [] main_offset = {0.0,0.0};
// return (use_rig && (rigOffset != null)) ? rigOffset.getAuxOffset(): main_offset ;
// }
public Matrix getRotMatrix(boolean use_rig){
return (use_rig && (rigOffset != null)) ? rigOffset.getRotMatrix(): null ;
}
......@@ -656,6 +666,10 @@ public class GeometryCorrection {
return vector;
}
// public double [] getAuxOffset() {
// double [] aux_offset= {baseline * Math.cos(aux_angle)/getDisparityRadius(), baseline * Math.sin(aux_angle)/getDisparityRadius()};
// return aux_offset;
// }
public void recalcRXY() {
if (rXY != null) {
// rXY_aux = rXY; // FIXME: put real stuff !!!
......@@ -695,10 +709,6 @@ public class GeometryCorrection {
{ xc_pix, yc_pix},
{dxc_dangle, dyc_dangle},
{dxc_baseline, dyc_baseline}};
/* double [][] rslt = {
{ -xc_pix, -yc_pix},
{-dxc_dangle, -dyc_dangle},
{-dxc_baseline, -dyc_baseline}}; */
return rslt;
}
......@@ -2236,8 +2246,13 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double ri_scale = 0.001 * gc_main.pixelSize / gc_main.distortionRadius;
// non-distorted XY relative to the auxiliary camera center if it was parallel to the main one
double pXci0 = pXc - disparity * aux_offset_derivs[0][0]; // in pixels
double pYci0 = pYc - disparity * aux_offset_derivs[0][1]; // in pixels
// Allow aux_offset_derivs null only if disparity == 0.0;
double pXci0 = pXc; // - disparity * aux_offset_derivs[0][0]; // in pixels
double pYci0 = pYc; // - disparity * aux_offset_derivs[0][1]; // in pixels
if (disparity != 0.0) { // aux_offset_derivs != null)
pXci0 -= disparity * aux_offset_derivs[0][0]; // in pixels
pYci0 -= disparity * aux_offset_derivs[0][1]; // in pixels
}
// rectilinear here
// Convert a 2-d non-distorted vector to 3d at fl_pix distance in z direction
......@@ -2246,7 +2261,7 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
// Apply port-individual combined rotation/zoom matrix
Matrix rvi = aux_rot.times(vi);
Matrix rvi = (aux_rot == null) ? vi: aux_rot.times(vi);
// get back to the projection plane by normalizing vector
......@@ -2480,6 +2495,51 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
return pXY;
}
/*
public double [] getAuxCoordinatesRigIdeal( // used in macro mode
GeometryCorrection gc_main,
Matrix rots,
double px,
double py,
double disparity)
{
// reverse getPortsCoordinates
double c_roll = 1.0; // Math.cos(( - this.common_roll) * Math.PI/180.0);
double s_roll = 0.0; // Math.sin(( - this.common_roll) * Math.PI/180.0);
double pXcd0 = px - 0.5 * this.pixelCorrectionWidth;
double pYcd0 = py - 0.5 * this.pixelCorrectionWidth;
double pXcd = c_roll * pXcd0 - s_roll* pYcd0;
double pYcd = s_roll * pXcd0 + c_roll* pYcd0;
double rD = Math.sqrt(pXcd*pXcd + pYcd*pYcd)*0.001*this.pixelSize; // distorted radius in a virtual center camera
double rND2R=getRByRDist(rD/this.distortionRadius, (debugLevel > -1));
double pXc = pXcd * rND2R; // non-distorted coordinates relative to the (0.5 * this.pixelCorrectionWidth, 0.5 * this.pixelCorrectionHeight)
double pYc = pYcd * rND2R; // in pixels
double [] a={this.distortionC,this.distortionB,this.distortionA,this.distortionA5,this.distortionA6,this.distortionA7,this.distortionA8};
double [] pXY = new double[2];
// calculate for aux (this) camera
double [][] aux_offset = getAuxOffsetAndDerivatives(gc_main);
// non-distorted XY of the shifted location of the individual sensor
double pXci = pXc - disparity * aux_offset[0][0]; // in pixels
double pYci = pYc - disparity * aux_offset[0][1];
// calculate back to distorted
double rNDi = Math.sqrt(pXci*pXci + pYci*pYci); // in pixels
// Rdist/R=A8*R^7+A7*R^6+A6*R^5+A5*R^4+A*R^3+B*R^2+C*R+(1-A8-A7-A6-A5-A-B-C)");
double ri = rNDi* 0.001 * this.pixelSize / this.distortionRadius; // relative to distortion radius
// double rD2rND = (1.0 - distortionA8 - distortionA7 - distortionA6 - distortionA5 - distortionA - distortionB - distortionC);
double rD2rND = 1.0;
double rri = 1.0;
for (int j = 0; j < a.length; j++){
rri *= ri;
rD2rND += a[j]*(rri - 1.0);
}
double pXid = pXci * rD2rND;
double pYid = pYci * rD2rND;
pXY[i][0] = c_roll * pXid + s_roll* pYid + 0.5 * this.pixelCorrectionWidth; // this.pXY0[i][0];
pXY[i][1] = -s_roll * pXid + c_roll* pYid + 0.5 * this.pixelCorrectionWidth; // this.pXY0[i][1];
return pXY;
}
*/
public double [][] getPortsCoordinatesIdeal(
......@@ -2500,6 +2560,30 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
return coords;
}
public double [] getRigAuxCoordinatesIdeal(
int macro_scale, // 1 for pixels, 8 - for tiles when correlating tiles instead of the pixels
GeometryCorrection gc_main,
Matrix aux_rot,
double px,
double py,
double disparity)
{
double [] xy = getRigAuxCoordinatesAndDerivatives(
gc_main, // GeometryCorrection gc_main,
aux_rot, // Matrix aux_rot,
null, // Matrix [] aux_rot_derivs,
null, // double [][] aux_offset_derivs,
null, // double [][] pXYderiv, // if not null, should be double[6][]
px * macro_scale, // double px,
py * macro_scale, // double py,
disparity); // double disparity);
double [] coords = {xy[0]/macro_scale,xy[1]/macro_scale};
return coords;
}
// Copied from PixelMapping
/**
......
This diff is collapsed.
......@@ -201,25 +201,26 @@ public class PixelMapping {
public float [] getBayerFlatFieldFloat(
int channel,
int width,
int height,
// int width,
// int height,
int [][] bayer){ //{{1,0},{2,1}} GR/BG
if ((this.sensors == null) || (channel<0) && (channel>=this.sensors.length))return null;
// width = this.sensors[channel]
return this.sensors[channel].getBayerFlatFieldFloat(
width,
height,
// width,
// height,
bayer);
}
public double [] getBayerFlatField(
int channel,
int width,
int height,
// int width,
// int height,
int [][] bayer){ //{{1,0},{2,1}} GR/BG
if ((this.sensors == null) || (channel<0) && (channel>=this.sensors.length))return null;
return this.sensors[channel].getBayerFlatField(
width,
height,
// width,
// height,
bayer);
}
......@@ -15645,8 +15646,8 @@ public class PixelMapping {
public double [][] pixelCorrection= null; // x,y, alpha, add flat, color, etc.
// public double [] sensorMask= null;
public int pixelCorrectionDecimation= 1;
public int pixelCorrectionWidth= 2592;
public int pixelCorrectionHeight= 1936;
public int pixelCorrectionWidth= -1; // 2592;
public int pixelCorrectionHeight= -1; // 1936;
public double entrancePupilForward= 0.0;
......@@ -15673,6 +15674,11 @@ public class PixelMapping {
public double [][] r_xyod=null; //{x0,y0,ortho, diagonal}
public int [] getSensorWH() {
int [] wh = {this.pixelCorrectionWidth, this.pixelCorrectionHeight};
return wh;
}
public SensorData (String channelPath , boolean ok ){
createEquirectangularMap(channelPath);
......@@ -15880,9 +15886,11 @@ public class PixelMapping {
}
public double [] getBayerFlatField(
int width,
int height,
// int width,
// int height,
int [][] bayer){ //{{1,0},{2,1}} GR/BG
int width = this.pixelCorrectionWidth; // width,
int height= this.pixelCorrectionHeight; // int height,
double [] corrScale = new double [width*height];
for (int y=0;y<height;y++) for (int x=0;x<width;x++){
......@@ -15908,6 +15916,13 @@ public class PixelMapping {
return corrScale;
}
public float [] getBayerFlatFieldFloat(
int [][] bayer){ //{{1,0},{2,1}} GR/BG
return _getBayerFlatFieldFloat(
this.pixelCorrectionWidth, // width,
this.pixelCorrectionHeight, // int height,
bayer);
}
private float [] _getBayerFlatFieldFloat(
int width,
int height,
int [][] bayer){ //{{1,0},{2,1}} GR/BG
......
......@@ -26,6 +26,7 @@
import java.awt.Rectangle;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
......@@ -38,6 +39,7 @@ import ij.WindowManager;
//import ij.gui.Overlay;
import ij.io.FileSaver;
import ij.process.ColorProcessor;
import ij.process.FloatProcessor;
import ij.process.ImageProcessor;
......@@ -2629,6 +2631,10 @@ public class QuadCLT {
eyesisCorrections.JP4_INSTANCE.decodeProperiesFromInfo(imp_srcs[srcChannel]); // decode existent properties from info
if (debugLevel>0) System.out.println("Processing "+sourceFiles[nFile]);
}
imp_srcs[srcChannel] = padBayerToFullSize(
imp_srcs[srcChannel], // ImagePlus imp_src,
eyesisCorrections.pixelMapping.sensors[srcChannel].getSensorWH(),
true); // boolean replicate);
scaleExposures[srcChannel] = 1.0;
if (!Double.isNaN(referenceExposures[nFile]) && (imp_srcs[srcChannel].getProperty("EXPOSURE")!=null)){
scaleExposures[srcChannel] = referenceExposures[nFile]/Double.parseDouble((String) imp_srcs[srcChannel].getProperty("EXPOSURE"));
......@@ -3189,6 +3195,81 @@ public class QuadCLT {
return nf;
}
/**
* Pad acquired Bayer image to the full sensor width/height. Used when optical center pixel coordinates do not match for channels
* and WOI is adjusted during image capture to avoid ERS mismatch between horizontal pairs
* @param imp_src source image with WOI specified as properties (sizes and offsets should be even)
* @param wh {sesnor_width, sensor_height} in pixels
* @param replicate fill gaps by replicating existing pixels
* @return full size image
*/
ImagePlus padBayerToFullSize(
ImagePlus imp_src,
int [] wh,
boolean replicate) {
int woi_top = Integer.parseInt((String) imp_src.getProperty("WOI_TOP")); // enforce even
int woi_left = Integer.parseInt((String) imp_src.getProperty("WOI_LEFT"));
int woi_width = imp_src.getWidth(); // Integer.parseInt((String) imp_src.getProperty("WOI_WIDTH"));
int woi_height = imp_src.getHeight(); // Integer.parseInt((String) imp_src.getProperty("WOI_HEIGHT"));
Properties properties = imp_src.getProperties();
if ((woi_top == 0) && (woi_left == 0) && (woi_width == wh[0]) && (woi_height == wh[1])){
return imp_src; // good as is
}
float [] full_pixels = new float [wh[0]*wh[1]];
float [] pixels=(float []) imp_src.getProcessor().getPixels();
int dst_col = woi_left;
int copy_width = woi_width;
if ((dst_col + copy_width) > wh[0]) {
copy_width = wh[0] - dst_col;
}
for (int src_row = 0; src_row < woi_height; src_row++) {
int dst_row = src_row + woi_top;
if (dst_row < wh[1]) {
System.arraycopy( pixels, src_row * woi_width, full_pixels, dst_row * wh[0] + dst_col, copy_width);
}
}
if (replicate) {
// replicate top
for (int dst_row = 0; dst_row < woi_top; dst_row++) {
int src_row = woi_top + (dst_row & 1);
System.arraycopy( full_pixels, src_row * wh[0] + dst_col, full_pixels, dst_row * wh[0] + dst_col, copy_width);
}
// replicate bottom
for (int dst_row = woi_top + woi_height; dst_row < wh[1]; dst_row++) {
int src_row = woi_top + woi_height - 2 + (dst_row & 1);
System.arraycopy( full_pixels, src_row * wh[0] + dst_col, full_pixels, dst_row * wh[0] + dst_col, copy_width);
}
// right and left are not likely, as there is no need to use them - horizontal mismatch does not influence ERS
for (int col = 0; col < woi_left; col++) {
for (int row = 0; row < wh[1]; row++) {
full_pixels[row*wh[0] + col] = full_pixels[row*wh[0] + woi_left + (col & 1)];
}
}
for (int col = woi_left + woi_width; col < wh[0]; col++) {
for (int row = 0; row < wh[1]; row++) {
full_pixels[row*wh[0] + col] = full_pixels[row*wh[0] + woi_left + woi_width - 2 +(col & 1)];
}
}
}
ImageProcessor ip = new FloatProcessor(wh[0],wh[1]);
ip.setPixels(full_pixels);
ip.resetMinAndMax(); // is it needed here?
ImagePlus imp = new ImagePlus(imp_src.getTitle(),ip); // OK to have the same name?
for (Map.Entry<?, ?> entry: properties.entrySet()) {
String key = (String) entry.getKey();
String value = (String) entry.getValue();
imp.setProperty(key, value);
}
imp.setProperty("WOI_WIDTH", wh[0]+"");
imp.setProperty("WOI_HEIGHTH", wh[1]+"");
imp.setProperty("WOI_TOP", "0");
imp.setProperty("WOI_LEFT", "0");
return imp;
}
/**
* Conditions images for a single image set
......@@ -3215,6 +3296,7 @@ public class QuadCLT {
ImagePlus [] imp_srcs = new ImagePlus[channelFiles.length];
// double [] scaleExposures = new double[channelFiles.length]; //
double [][] dbg_dpixels = new double [channelFiles.length][];
// int [] fullWindowWH = geometryCorrection.getSensorWH();
for (int srcChannel=0; srcChannel < channelFiles.length; srcChannel++){
int nFile=channelFiles[srcChannel]; // channelFiles[srcChannel];
......@@ -3246,7 +3328,12 @@ public class QuadCLT {
eyesisCorrections.JP4_INSTANCE.decodeProperiesFromInfo(imp_srcs[srcChannel]); // decode existent properties from info
if (debugLevel>0) System.out.println("Processing "+sourceFiles[nFile]);
}
// imp_srcs[srcChannel].show(); // REMOVE ME!
imp_srcs[srcChannel] = padBayerToFullSize(
imp_srcs[srcChannel], // ImagePlus imp_src,
eyesisCorrections.pixelMapping.sensors[srcChannel].getSensorWH(),
true); // boolean replicate);
scaleExposures[srcChannel] = 1.0;
if (!Double.isNaN(referenceExposures[nFile]) && (imp_srcs[srcChannel].getProperty("EXPOSURE")!=null)){
scaleExposures[srcChannel] = referenceExposures[nFile]/Double.parseDouble((String) imp_srcs[srcChannel].getProperty("EXPOSURE"));
......@@ -4736,6 +4823,10 @@ public class QuadCLT {
eyesisCorrections.JP4_INSTANCE.decodeProperiesFromInfo(imp_srcs[srcChannel]); // decode existent properties from info
if (debugLevel>0) System.out.println("Processing "+sourceFiles[nFile]);
}
imp_srcs[srcChannel] = padBayerToFullSize(
imp_srcs[srcChannel], // ImagePlus imp_src,
eyesisCorrections.pixelMapping.sensors[srcChannel].getSensorWH(),
true); // boolean replicate);
scaleExposures[srcChannel] = 1.0;
if (!Double.isNaN(referenceExposures[nFile]) && (imp_srcs[srcChannel].getProperty("EXPOSURE")!=null)){
scaleExposures[srcChannel] = referenceExposures[nFile]/Double.parseDouble((String) imp_srcs[srcChannel].getProperty("EXPOSURE"));
......@@ -8373,7 +8464,7 @@ public class QuadCLT {
}
public ImagePlus [] conditionImageSet(
public ImagePlus [] conditionImageSetBatch( // used in batchCLT3d
final int nSet, // index of the 4-image set
final EyesisCorrectionParameters.CLTParameters clt_parameters,
final int [][] fileIndices, // =new int [numImagesToProcess][2]; // file index, channel number
......@@ -8431,7 +8522,10 @@ public class QuadCLT {
eyesisCorrections.JP4_INSTANCE.decodeProperiesFromInfo(imp_srcs[srcChannel]); // decode existent properties from info
if (debugLevel>0) System.out.println("Processing "+sourceFiles[nFile]);
}
imp_srcs[srcChannel] = padBayerToFullSize(
imp_srcs[srcChannel], // ImagePlus imp_src,
eyesisCorrections.pixelMapping.sensors[srcChannel].getSensorWH(),
true); // boolean replicate);
scaleExposures[srcChannel] = 1.0;
if (!Double.isNaN(referenceExposures[nFile]) && (imp_srcs[srcChannel].getProperty("EXPOSURE")!=null)){
scaleExposures[srcChannel] = referenceExposures[nFile]/Double.parseDouble((String) imp_srcs[srcChannel].getProperty("EXPOSURE"));
......@@ -8712,7 +8806,7 @@ public class QuadCLT {
this.startSetTime = System.nanoTime();
boolean [][] saturation_imp = (clt_parameters.sat_level > 0.0)? new boolean[QUAD][] : null;
double [] scaleExposures = new double[QUAD]; //
ImagePlus [] imp_srcs = conditionImageSet(
ImagePlus [] imp_srcs = conditionImageSetBatch(
nSet, // final int nSet, // index of the 4-image set
clt_parameters, // final EyesisCorrectionParameters.CLTParameters clt_parameters,
fileIndices, // final int [][] fileIndices, // =new int [numImagesToProcess][2]; // file index, channel number
......
This diff is collapsed.
import java.awt.Rectangle;
import ij.*;
import ij.IJ;
import ij.ImagePlus;
import ij.ImageStack;
import ij.gui.GenericDialog;
import ij.gui.Roi;
import ij.process.*;
import ij.process.ColorProcessor;
import ij.process.FloatProcessor;
import ij.process.ImageProcessor;
public class showDoubleFloatArrays {
// defaults for color conversion
......@@ -92,14 +96,18 @@ import ij.process.*;
if (asStack) {
float [] fpixels;
ImageStack array_stack=new ImageStack(width,height);
boolean not_empty = false;
for (i=0;i<pixels.length;i++) if (pixels[i]!=null) {
not_empty = true;
fpixels=new float[pixels[i].length];
for (j=0;j<fpixels.length;j++) fpixels[j]=(float)pixels[i][j];
array_stack.addSlice(titles[i], fpixels);
}
if (not_empty) {
ImagePlus imp_stack = new ImagePlus(title, array_stack);
imp_stack.getProcessor().resetMinAndMax();
imp_stack.show();
}
return;
} else showArrays(pixels, width, height, titles);
}
......@@ -111,7 +119,7 @@ import ij.process.*;
ImageStack array_stack=new ImageStack(width,height);
for (i=0;i<pixels.length;i++) if (pixels[i]!=null) {
fpixels=new float[pixels[i].length];
for (j=0;j<fpixels.length;j++) fpixels[j]=(float)pixels[i][j];
for (j=0;j<fpixels.length;j++) fpixels[j]=pixels[i][j];
array_stack.addSlice(titles[i], fpixels);
}
ImagePlus imp_stack = new ImagePlus(title, array_stack);
......@@ -242,7 +250,7 @@ import ij.process.*;
ImagePlus[] imp=new ImagePlus[pixels.length];
for (i=0;i<pixels.length;i++) if (pixels[i]!=null) {
fpixels=new float[pixels[i].length];
for (j=0;j<fpixels.length;j++) fpixels[j]=(float)pixels[i][j];
for (j=0;j<fpixels.length;j++) fpixels[j]=pixels[i][j];
ip[i]=new FloatProcessor(width,height);
ip[i].setPixels(fpixels);
ip[i].resetMinAndMax();
......@@ -299,7 +307,7 @@ import ij.process.*;
float [] fpixels;
if (pixels!=null) {
fpixels=new float[pixels.length];
for (j=0;j<pixels.length;j++) fpixels[j]=(float)pixels[j];
for (j=0;j<pixels.length;j++) fpixels[j]=pixels[j];
ImageProcessor ip=new FloatProcessor(width,height);
ip.setPixels(fpixels);
ip.resetMinAndMax();
......@@ -330,7 +338,7 @@ import ij.process.*;
float [] fpixels;
if (pixels!=null) {
fpixels=new float[pixels.length];
for (j=0;j<pixels.length;j++) fpixels[j]=(float)pixels[j];
for (j=0;j<pixels.length;j++) fpixels[j]=pixels[j];
ImageProcessor ip=new FloatProcessor(width,height);
ip.setPixels(fpixels);
ip.resetMinAndMax();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment