Commit f2987dd0 authored by Andrey Filippov's avatar Andrey Filippov

Merge remote-tracking branch 'origin/dct'

parents 3dd608b9 96038858
......@@ -100,7 +100,7 @@ public class CorrectionColorProc {
}
//public void showArrays(double[][] pixels, int width, int height, boolean asStack, String title) {
//public void showArrays(double[][] pixels, int width, int height, boolean asStack, String title) {
......@@ -138,7 +138,7 @@ public class CorrectionColorProc {
/* Convert to YPbPr */
double Y,Pb,Pr;
// double Kg=1.0-colorProcParameters.kr-colorProcParameters.kb;
// double Kg=1.0-colorProcParameters.kr-colorProcParameters.kb;
double Sb=0.5/(1.0-colorProcParameters.kb)*colorProcParameters.saturationBlue;
double Sr=0.5/(1.0-colorProcParameters.kr)*colorProcParameters.saturationRed;
double Yr,Yg,Yb,Wr,Wg,Wb,S;
......@@ -271,7 +271,7 @@ public class CorrectionColorProc {
stack.addSlice("Pr", fpixels_pr);
stack.addSlice("Pb", fpixels_pb);
stack.addSlice("Y", fpixels_y);
stack.addSlice("Y0", fpixels_y0); // not filtered by low-pass, preliminary (for comaprison only)
stack.addSlice("Y0", fpixels_y0); // not filtered by low-pass, preliminary (for comparison only)
if (debugLevel>2) {
stack.addSlice("Yr",fpixels_yR);
stack.addSlice("Yg",fpixels_yG);
......
......@@ -1773,40 +1773,64 @@ public class EyesisCorrectionParameters {
}
}
public static class DCTParameters {
public int dct_size = 32; //
public int asym_size = 6; //
public int asym_pixels = 10; // maximal number of non-zero pixels in direct convolution kernel
public int asym_distance = 2; // how far to try a new asym kernel pixel from existing ones
public int dct_size = 8; //
public int asym_size = 15; //
public int asym_pixels = 4; // maximal number of non-zero pixels in direct convolution kernel
public int asym_distance = 1; // how far to try a new asym kernel pixel from existing ones
public int dct_window = 1; // currently only 3 types of windows - 0 (none), 1 and 2
public int LMA_steps = 100;
public double fact_precision=0.003; // stop iterations if error rms less than this part of target kernel rms
public double compactness = 0.02;
public double sym_compactness = 0.01;
public double dc_weight = 10; // importance of dc realtive to rms_pure
public int asym_tax_free = 5; // "compactness" does not apply to pixels with |x|<=asym_tax_free and |y| <= asym_tax_free
public int seed_size = 8; // number of initial cells in asym_kernel - should be 4*b + 1 (X around center cell) or 4*n + 0 (X around between cells)
public double asym_random; // initialize asym_kernel with random numbers
public double dbg_x =0;
public double dbg_y =0;
public double dbg_x1 =0;
public double dbg_y1 =0;
public double dbg_sigma =2.0;
public double fact_precision= 0.003; // stop iterations if error rms less than this part of target kernel rms
public double compactness = 0.0;
public double sym_compactness = 0.5;
public double dc_weight = 1.0; // importance of dc realtive to rms_pure
public int asym_tax_free = 0; // "compactness" does not apply to pixels with |x|<=asym_tax_free and |y| <= asym_tax_free
public int seed_size = 1; // number of initial cells in asym_kernel - should be 4*b + 1 (X around center cell) or 4*n + 0 (X around between cells)
public double asym_random = -1; // initialize asym_kernel with random numbers
public double dbg_x = 2.7;
public double dbg_y = 0.0;
public double dbg_x1 = -1.3;
public double dbg_y1 = 2.0;
public double dbg_sigma = 0.8;
public String dbg_mask = ".........:::::::::.........:::::::::......*..:::::*:::.........:::::::::.........";
public int dbg_mode = 1; // 0 - old LMA, 1 - new LMA - *** not used anymore ***
public int dbg_window_mode = 2; // 0 - none, 1 - square, 2 - sin 3 - sin^2
public int dbg_window_mode = 1; // 0 - none, 1 - square, 2 - sin 3 - sin^2 Now _should_ be square !!!
public boolean centerWindowToTarget = true;
// parameters to extract a kernel from the kernel image file
public int color_channel = 2; // green (<0 - use simulated kernel, also will use simulated if kernels are not set)
public int decimation = 2; // decimate original kernel this much in each direction
public double decimateSigma = 0.4; // what is the optimal value for each decimation?
public double decimateSigma = -1.0; // special mode for 2:1 deciamtion
public int tileX = 82; // number of kernel tile (0..163)
public int tileY = 62; // number of kernel tile (0..122)
public boolean subtract_dc = false;//subtract/restore dc
public int kernel_chn = -1; //camera channel calibration to use for aberration correction ( < 0 - no correction)
public int kernel_chn = -1; // camera channel calibration to use for aberration correction ( < 0 - no correction)
public boolean normalize = true; //normalize both sym and asym kernels (asym to have sum==1, sym to have sum = dct_size
public boolean normalize_sym = true; //normalize sym kernels separately
public boolean skip_sym = false; // do not apply symmetrical correction
public boolean convolve_direct = false; // do not apply symmetrical correction
public double vignetting_max = 0.4; // value in vignetting data to correspond to 1x in the kernel
public double vignetting_range = 5.0; // do not try to correct vignetting less than vignetting_max/vignetting_range
public boolean color_DCT = true; // false - use old color processing mode
public double sigma_rb = 0.9; // additional (to G) blur for R and B
public double sigma_y = 0.7; // blur for G contribution to Y
public double sigma_color = 0.7; // blur for Pb and Pr in addition to that of Y
public double line_thershold = 1.0; // line detection amplitude to apply line enhancement - not used?
public boolean nonlin = true; // enable nonlinear processing (including denoise
public double nonlin_max_y = 1.0; // maximal amount of nonlinear line/edge emphasis for Y component
public double nonlin_max_c = 1.0; // maximal amount of nonlinear line/edge emphasis for C component
public double nonlin_y = 0.1; // amount of nonlinear line/edge emphasis for Y component
public double nonlin_c = 0.01; // amount of nonlinear line/edge emphasis for C component
public double nonlin_corn = 0.5; // relative weight for nonlinear corner elements
public boolean denoise = true; // suppress noise during nonlinear processing
public double denoise_y = 1.0; // maximal total smoothing of the Y post-kernel (will compete with edge emphasis)
public double denoise_c = 1.0; // maximal total smoothing of the color differences post-kernel (will compete with edge emphasis)
public double denoise_y_corn = 0.3; // weight of the 4 corner pixels during denoise y (straight - 1-denoise_y_corn)
public double denoise_c_corn = 0.3; // weight of the 4 corner pixels during denoise y (straight - 1-denoise_c_corn)
public DCTParameters(){}
public DCTParameters(
int dct_size,
int asym_size,
......@@ -1856,8 +1880,31 @@ public class EyesisCorrectionParameters {
properties.setProperty(prefix+"subtract_dc", this.subtract_dc+"");
properties.setProperty(prefix+"kernel_chn", this.kernel_chn+"");
properties.setProperty(prefix+"normalize", this.normalize+"");
properties.setProperty(prefix+"normalize_sym", this.normalize_sym+"");
properties.setProperty(prefix+"skip_sym", this.skip_sym+"");
properties.setProperty(prefix+"convolve_direct", this.convolve_direct+"");
properties.setProperty(prefix+"vignetting_max", this.vignetting_max+"");
properties.setProperty(prefix+"vignetting_range", this.vignetting_range+"");
properties.setProperty(prefix+"color_DCT", this.color_DCT+"");
properties.setProperty(prefix+"sigma_rb", this.sigma_rb+"");
properties.setProperty(prefix+"sigma_y", this.sigma_y+"");
properties.setProperty(prefix+"sigma_color", this.sigma_color+"");
properties.setProperty(prefix+"line_thershold", this.line_thershold+"");
properties.setProperty(prefix+"nonlin", this.nonlin+"");
properties.setProperty(prefix+"nonlin_max_y", this.nonlin_max_y+"");
properties.setProperty(prefix+"nonlin_max_c", this.nonlin_max_c+"");
properties.setProperty(prefix+"nonlin_y", this.nonlin_y+"");
properties.setProperty(prefix+"nonlin_c", this.nonlin_c+"");
properties.setProperty(prefix+"nonlin_corn", this.nonlin_corn+"");
properties.setProperty(prefix+"denoise", this.denoise+"");
properties.setProperty(prefix+"denoise_y", this.denoise_y+"");
properties.setProperty(prefix+"denoise_c", this.denoise_c+"");
properties.setProperty(prefix+"denoise_y_corn", this.denoise_y_corn+"");
properties.setProperty(prefix+"denoise_c_corn", this.denoise_c_corn+"");
}
public void getProperties(String prefix,Properties properties){
......@@ -1891,8 +1938,29 @@ public class EyesisCorrectionParameters {
if (properties.getProperty(prefix+"subtract_dc")!=null) this.subtract_dc=Boolean.parseBoolean(properties.getProperty(prefix+"subtract_dc"));
if (properties.getProperty(prefix+"kernel_chn")!=null) this.kernel_chn=Integer.parseInt(properties.getProperty(prefix+"kernel_chn"));
if (properties.getProperty(prefix+"normalize")!=null) this.normalize=Boolean.parseBoolean(properties.getProperty(prefix+"normalize"));
if (properties.getProperty(prefix+"normalize_sym")!=null) this.normalize_sym=Boolean.parseBoolean(properties.getProperty(prefix+"normalize_sym"));
if (properties.getProperty(prefix+"skip_sym")!=null) this.skip_sym=Boolean.parseBoolean(properties.getProperty(prefix+"skip_sym"));
if (properties.getProperty(prefix+"convolve_direct")!=null) this.convolve_direct=Boolean.parseBoolean(properties.getProperty(prefix+"convolve_direct"));
if (properties.getProperty(prefix+"vignetting_max")!=null) this.vignetting_max=Double.parseDouble(properties.getProperty(prefix+"vignetting_max"));
if (properties.getProperty(prefix+"vignetting_range")!=null) this.vignetting_range=Double.parseDouble(properties.getProperty(prefix+"vignetting_range"));
if (properties.getProperty(prefix+"color_DCT")!=null) this.color_DCT=Boolean.parseBoolean(properties.getProperty(prefix+"color_DCT"));
if (properties.getProperty(prefix+"sigma_rb")!=null) this.sigma_rb=Double.parseDouble(properties.getProperty(prefix+"sigma_rb"));
if (properties.getProperty(prefix+"sigma_y")!=null) this.sigma_y=Double.parseDouble(properties.getProperty(prefix+"sigma_y"));
if (properties.getProperty(prefix+"sigma_color")!=null) this.sigma_color=Double.parseDouble(properties.getProperty(prefix+"sigma_color"));
if (properties.getProperty(prefix+"line_thershold")!=null) this.line_thershold=Double.parseDouble(properties.getProperty(prefix+"line_thershold"));
if (properties.getProperty(prefix+"nonlin")!=null) this.nonlin=Boolean.parseBoolean(properties.getProperty(prefix+"nonlin"));
if (properties.getProperty(prefix+"nonlin_max_y")!=null) this.nonlin_max_y=Double.parseDouble(properties.getProperty(prefix+"nonlin_max_y"));
if (properties.getProperty(prefix+"nonlin_max_c")!=null) this.nonlin_max_c=Double.parseDouble(properties.getProperty(prefix+"nonlin_max_c"));
if (properties.getProperty(prefix+"nonlin_y")!=null) this.nonlin_y=Double.parseDouble(properties.getProperty(prefix+"nonlin_y"));
if (properties.getProperty(prefix+"nonlin_c")!=null) this.nonlin_c=Double.parseDouble(properties.getProperty(prefix+"nonlin_c"));
if (properties.getProperty(prefix+"nonlin_corn")!=null) this.nonlin_corn=Double.parseDouble(properties.getProperty(prefix+"nonlin_corn"));
if (properties.getProperty(prefix+"denoise")!=null) this.denoise=Boolean.parseBoolean(properties.getProperty(prefix+"denoise"));
if (properties.getProperty(prefix+"denoise_y")!=null) this.denoise_y=Double.parseDouble(properties.getProperty(prefix+"denoise_y"));
if (properties.getProperty(prefix+"denoise_c")!=null) this.denoise_c=Double.parseDouble(properties.getProperty(prefix+"denoise_c"));
if (properties.getProperty(prefix+"denoise_y_corn")!=null) this.denoise_y_corn=Double.parseDouble(properties.getProperty(prefix+"denoise_y_corn"));
if (properties.getProperty(prefix+"denoise_c_corn")!=null) this.denoise_c_corn=Double.parseDouble(properties.getProperty(prefix+"denoise_c_corn"));
}
public boolean showDialog() {
......@@ -1927,9 +1995,30 @@ public class EyesisCorrectionParameters {
gd.addCheckbox ("Subtract avarege before dct, restore after idct", this.subtract_dc);
gd.addNumericField("Calibration channel to use for aberration ( <0 - no correction)",this.kernel_chn, 0);
gd.addCheckbox ("Normalize both sym and asym kernels ", this.normalize);
gd.addCheckbox ("Normalize sym kernels separately", this.normalize_sym);
gd.addCheckbox ("Do not apply symmetrical (DCT) correction ", this.skip_sym);
gd.addCheckbox ("Convolve directly with symmetrical kernel (debug feature) ", this.convolve_direct);
gd.addNumericField("Value (max) in vignetting data to correspond to 1x in the kernel",this.vignetting_max, 3);
gd.addNumericField("Do not try to correct vignetting smaller than this fraction of max",this.vignetting_range, 3);
gd.addCheckbox ("Use DCT-base color conversion", this.color_DCT );
gd.addNumericField("Gaussian sigma to apply to R and B (in addition to G), pix", this.sigma_rb, 3);
gd.addNumericField("Gaussian sigma to apply to Y in the MDCT domain, pix", this.sigma_y, 3);
gd.addNumericField("Gaussian sigma to apply to Pr and Pb in the MDCT domain, pix", this.sigma_color, 3);
gd.addNumericField("Threshold for line detection (not yet used)", this.line_thershold, 3);
gd.addCheckbox ("Use non-linear line emphasis and denoise", this.nonlin );
gd.addNumericField("Maximal amount of non-linear emphasis for linear edges for Y component", this.nonlin_max_y,3);
gd.addNumericField("Maximal amount of non-linear emphasis for linear edges for color diffs.", this.nonlin_max_c,3);
gd.addNumericField("Sensitivity of non-linear emphasis for linear edges for Y component", this.nonlin_y, 3);
gd.addNumericField("Sensitivity of non-linear emphasis for linear edges for color diffs.", this.nonlin_c, 3);
gd.addNumericField("Corretion for diagonal/corner emphasis elements", this.nonlin_corn, 3);
gd.addCheckbox ("Suppress noise during nonlinear processing", this.denoise );
gd.addNumericField("Maximal total smoothing of the Y post-kernel (will compete with edge emphasis)", this.denoise_y,3);
gd.addNumericField("Maximal total smoothing of the color differences post-kernel (will compete with edge emphasis)", this.denoise_c,3);
gd.addNumericField("Weight of the 4 corner pixels during denoising y (straight - 1.0-denoise_y_corn)", this.denoise_y_corn,3);
gd.addNumericField("Weight of the 4 corner pixels during denoising color ((straight - 1.0-denoise_c_corn))", this.denoise_c_corn,3);
WindowTools.addScrollBars(gd);
gd.showDialog();
if (gd.wasCanceled()) return false;
......@@ -1963,8 +2052,30 @@ public class EyesisCorrectionParameters {
this.subtract_dc= gd.getNextBoolean();
this.kernel_chn= (int) gd.getNextNumber();
this.normalize= gd.getNextBoolean();
this.normalize_sym= gd.getNextBoolean();
this.skip_sym= gd.getNextBoolean();
this.convolve_direct= gd.getNextBoolean();
this.vignetting_max= gd.getNextNumber();
this.vignetting_range= gd.getNextNumber();
this.color_DCT= gd.getNextBoolean();
this.sigma_rb= gd.getNextNumber();
this.sigma_y= gd.getNextNumber();
this.sigma_color= gd.getNextNumber();
this.line_thershold= gd.getNextNumber();
this.nonlin= gd.getNextBoolean();
this.nonlin_max_y= gd.getNextNumber();
this.nonlin_max_c= gd.getNextNumber();
this.nonlin_y= gd.getNextNumber();
this.nonlin_c= gd.getNextNumber();
this.nonlin_corn= gd.getNextNumber();
this.denoise= gd.getNextBoolean();
this.denoise_y= gd.getNextNumber();
this.denoise_c= gd.getNextNumber();
this.denoise_y_corn= gd.getNextNumber();
this.denoise_c_corn= gd.getNextNumber();
// MASTER_DEBUG_LEVEL= (int) gd.getNextNumber();
return true;
}
......
......@@ -1390,7 +1390,8 @@ public class EyesisCorrections {
/* ======================================================================== */
private boolean fixSliceSequence (
// private boolean fixSliceSequence (
public boolean fixSliceSequence ( // for EyesisDCT
ImageStack stack,
int debugLevel){
int i,j;
......@@ -2234,7 +2235,8 @@ public class EyesisCorrections {
return mask;
}
/* ======================================================================== */
private void saveAndShow(
// private void saveAndShow(
public void saveAndShow(
ImagePlus imp,
EyesisCorrectionParameters.CorrectionParameters correctionsParameters){
saveAndShowEnable( imp, correctionsParameters , true, true);
......@@ -2253,7 +2255,8 @@ public class EyesisCorrections {
correctionsParameters.JPEG_quality);
}
private void saveAndShow(
void saveAndShow(
// public void saveAndShow(
ImagePlus imp,
EyesisCorrectionParameters.CorrectionParameters correctionsParameters,
boolean save,
......@@ -2261,7 +2264,7 @@ public class EyesisCorrections {
saveAndShow(imp, correctionsParameters, save, show, -1);
}
private void saveAndShow(
void saveAndShow(
ImagePlus imp,
EyesisCorrectionParameters.CorrectionParameters correctionsParameters,
boolean save,
......
......@@ -23,10 +23,15 @@
*/
import java.util.concurrent.atomic.AtomicInteger;
import ij.CompositeImage;
import ij.IJ;
import ij.ImagePlus;
import ij.ImageStack;
import ij.Prefs;
import ij.io.FileSaver;
import ij.process.FloatProcessor;
import ij.process.ImageProcessor;
public class EyesisDCT {
......@@ -35,6 +40,8 @@ public class EyesisDCT {
public EyesisCorrectionParameters.DCTParameters dctParameters = null;
public DCTKernels [] kernels = null;
public ImagePlus eyesisKernelImage = null;
public long startTime;
public EyesisDCT(
EyesisCorrections eyesisCorrections,
EyesisCorrectionParameters.CorrectionParameters correctionsParameters,
......@@ -72,8 +79,15 @@ public class EyesisDCT {
return eyesisKernelImage != null;
}
public boolean DCTKernelsAvailable(){
return kernels != null;
}
public boolean createDCTKernels(
EyesisCorrectionParameters.DCTParameters dct_parameters,
/*
PixelMapping pixelMapping,
*/
int srcKernelSize,
int threadsMax, // maximal number of threads to launch
boolean updateStatus,
......@@ -95,6 +109,7 @@ public class EyesisDCT {
}
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
for (int chn=0;chn<eyesisCorrections.usedChannels.length;chn++){
if (eyesisCorrections.usedChannels[chn] && (sharpKernelPaths[chn]!=null) && (kernels[chn]==null)){
ImagePlus imp_kernel_sharp=new ImagePlus(sharpKernelPaths[chn]);
......@@ -115,11 +130,55 @@ public class EyesisDCT {
debugLevel); // update status info
int sym_width = kernels.numHor * kernels.dct_size;
int sym_height = kernels.sym_kernels[0].length /sym_width;
sdfa_instance.showArrays(kernels.sym_kernels, sym_width, sym_height, true, imp_kernel_sharp.getTitle()+"-sym");
// save files
String [] symNames = {"red_sym","blue_sym","green_sym"};
String [] asymNames = {"red_asym","blue_asym","green_asym"};
ImageStack symStack = sdfa_instance.makeStack(
kernels.sym_kernels,
sym_width,
sym_height,
symNames);
String symPath=correctionsParameters.dctKernelDirectory+
Prefs.getFileSeparator()+
correctionsParameters.dctKernelPrefix+
String.format("%02d",chn)+
correctionsParameters.dctSymSuffix;
String msg="Saving symmetrical convolution kernels to "+symPath;
IJ.showStatus(msg);
if (debugLevel>0) System.out.println(msg);
ImagePlus imp_sym=new ImagePlus(imp_kernel_sharp.getTitle()+"-sym",symStack);
if (debugLevel > 1) {
imp_sym.getProcessor().resetMinAndMax();
imp_sym.show();
}
FileSaver fs=new FileSaver(imp_sym);
fs.saveAsTiffStack(symPath);
// sdfa_instance.showArrays(kernels.sym_kernels, sym_width, sym_height, true, imp_kernel_sharp.getTitle()+"-sym");
int asym_width = kernels.numHor * kernels.asym_size;
int asym_height = kernels.asym_kernels[0].length /asym_width;
sdfa_instance.showArrays(kernels.asym_kernels, asym_width, asym_height, true, imp_kernel_sharp.getTitle()+"-asym");
ImageStack asymStack = sdfa_instance.makeStack(
kernels.asym_kernels,
asym_width,
asym_height,
asymNames);
String asymPath=correctionsParameters.dctKernelDirectory+
Prefs.getFileSeparator()+
correctionsParameters.dctKernelPrefix+
String.format("%02d",chn)+
correctionsParameters.dctAsymSuffix;
msg="Saving asymmetrical convolution kernels "+asymPath;
IJ.showStatus(msg);
if (debugLevel>0) System.out.println(msg);
ImagePlus imp_asym=new ImagePlus(imp_kernel_sharp.getTitle()+"-asym",asymStack);
if (debugLevel > 1) {
imp_asym.getProcessor().resetMinAndMax();
imp_asym.show();
}
fs=new FileSaver(imp_asym);
fs.saveAsTiffStack(asymPath);
// sdfa_instance.showArrays(kernels.asym_kernels, asym_width, asym_height, true, imp_kernel_sharp.getTitle()+"-asym");
}
}
return true;
......@@ -130,6 +189,12 @@ public class EyesisDCT {
final ImageStack kernelStack, // first stack with 3 colors/slices convolution kernels
final int kernelSize, // 64
final EyesisCorrectionParameters.DCTParameters dct_parameters,
/*
final double [][] vignetting,
int vign_width,
int vign_height,
int vign_decimation,
*/
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int globalDebugLevel) // update status info
......@@ -155,9 +220,17 @@ public class EyesisDCT {
final int preTargetSize = 4 * dct_size;
final int targetSize = 2 * dct_size; // normally 16
final double [] anitperiodic_window = createAntiperiodicWindow(dct_size);
/*
final int vgn_step = dct_kernel.img_step/vign_decimation;
final int vgn_width = vign_width/vign_decimation;
final int vgn_height = vign_height/vign_decimation;
final int vgn_left = (vign_width - (dct_kernel.img_step * (kernelNumHor-1)))/(2* vign_decimation); // in decimated pixels
final int vgn_top = (vign_height -(dct_kernel.img_step * (kernelNumVert-1)))/(2* vign_decimation); // in decimated pixels
final double vgn_max = dct_parameters.vignetting_max;
final double vgn_min = vgn_max/dct_parameters.vignetting_range;
*/
final int chn_green = 2; // all others multiply by 4 as there 1 in 4 Bayer for those, green - by 2
final long startTime = System.nanoTime();
System.out.println("calculateDCTKernel():numberOfKernels="+numberOfKernels);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
......@@ -189,6 +262,20 @@ public class EyesisDCT {
chn=nTile/numberOfKernelsInChn;
tileY =(nTile % numberOfKernelsInChn)/kernelNumHor;
tileX = nTile % kernelNumHor;
/*
if (vignetting != null){
int vh = vgn_left + vgn_step * tileX;
if (vh < 0) vh = 0;
if (vh >= vgn_width) vh = vgn_width -1;
int vv = vgn_top + vgn_step * tileY;
if (vv < 0) vv = 0;
if (vv >= vgn_height) vh = vgn_height -1;
kscale = vignetting[chn][vv*vgn_width+vh];
if (kscale < vgn_min) kscale = vgn_min;
}
kscale = vgn_max/kscale * ((chn == chn_green)? 2:4);
*/
//// kscale = (chn == chn_green)? 2:4;
if (tileX==0) {
if (updateStatus) IJ.showStatus("Processing kernels, channel "+(chn+1)+" of "+nChn+", row "+(tileY+1)+" of "+kernelNumVert);
if (globalDebugLevel>2) System.out.println("Processing kernels, channel "+(chn+1)+" of "+nChn+", row "+(tileY+1)+" of "+kernelNumVert+" : "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3));
......@@ -224,8 +311,9 @@ public class EyesisDCT {
for (int i = 0; i < pre_target_kernel.length; i++){
s+=pre_target_kernel[i];
}
s = 1.0 / s;
for (int i = 0; i < pre_target_kernel.length; i++){
pre_target_kernel[i]/=s;
pre_target_kernel[i] *= s;
}
if (globalDebugLevel > 1){ // was already close to 1.0
System.out.println(tileX+"/"+tileY+ " s="+s);
......@@ -289,110 +377,8 @@ public class EyesisDCT {
// ImageStack outStack=new ImageStack(kernelNumHor,kernelNumVert);
return dct_kernel;
}
/*
* final int kernelNumHor=kernelWidth/kernelSize;
final int kernelNumVert=kernelStack.getHeight()/kernelSize;
final int nChn=kernelStack.getSize();
dct_kernel.sym_kernels = new double [nChn][kernelNumHor*kernelNumVert*dct_parameters.dct_size * dct_parameters.dct_size];
dct_kernel.asym_kernels = new double [nChn][kernelNumHor*kernelNumVert*dct_parameters.asym_size * dct_parameters.asym_size];
*
System.arraycopy(currentfX, 0, convolved, 0, convolved.length);
DCT_PARAMETERS.fact_precision,
DCT_PARAMETERS.asym_pixels,
DCT_PARAMETERS.asym_distance,
DCT_PARAMETERS.seed_size);
*/
//processChannelImage
//convolveStackWithKernelStack
// Remove later, copied here as a reference
public ImageStack calculateKernelsNoiseGains (
final ImageStack kernelStack1, // first stack with 3 colors/slices convolution kernels
final ImageStack kernelStack2, // second stack with 3 colors/slices convolution kernels (or null)
final int size, // 128 - fft size, kernel size should be size/2
final double blurSigma,
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int globalDebugLevel) // update status info
{
if (kernelStack1==null) return null;
final boolean useDiff= (kernelStack2 != null);
final int kernelSize=size/2;
final int kernelWidth=kernelStack1.getWidth();
final int kernelNumHor=kernelWidth/(size/2);
final int kernelNumVert=kernelStack1.getHeight()/(size/2);
final int length=kernelNumHor*kernelNumVert;
final int nChn=kernelStack1.getSize();
final float [][] outPixles=new float[nChn][length]; // GLOBAL same as input
int i,j;
for (i=0;i<nChn;i++) for (j=0;j<length;j++) outPixles[i][j]=0.0f;
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
final int numberOfKernels= kernelNumHor*kernelNumVert*nChn;
final int numberOfKernelsInChn=kernelNumHor*kernelNumVert;
final long startTime = System.nanoTime();
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
DoubleGaussianBlur gb=null;
if (blurSigma>0) gb=new DoubleGaussianBlur();
float [] kernelPixels1= null; // will be initialized at first use
float [] kernelPixels2= null; // will be initialized at first use
double [] kernel1= new double[kernelSize*kernelSize];
double [] kernel2= new double[kernelSize*kernelSize];
int chn,tileY,tileX;
int chn0=-1;
int i;
double sum;
for (int nTile = ai.getAndIncrement(); nTile < numberOfKernels; nTile = ai.getAndIncrement()) {
chn=nTile/numberOfKernelsInChn;
tileY =(nTile % numberOfKernelsInChn)/kernelNumHor;
tileX = nTile % kernelNumHor;
if (tileX==0) {
if (updateStatus) IJ.showStatus("Processing kernels, channel "+(chn+1)+" of "+nChn+", row "+(tileY+1)+" of "+kernelNumVert);
if (globalDebugLevel>2) System.out.println("Processing kernels, channel "+(chn+1)+" of "+nChn+", row "+(tileY+1)+" of "+kernelNumVert+" : "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3));
}
if (chn!=chn0) {
kernelPixels1=(float[]) kernelStack1.getPixels(chn+1);
if (useDiff) kernelPixels2=(float[]) kernelStack2.getPixels(chn+1);
chn0=chn;
}
/* read convolution kernel */
extractOneKernel(kernelPixels1, // array of combined square kernels, each
kernel1, // will be filled, should have correct size before call
kernelNumHor, // number of kernels in a row
tileX, // horizontal number of kernel to extract
tileY); // vertical number of kernel to extract
/* optionally read the second convolution kernel */
if (useDiff) {extractOneKernel(kernelPixels2, // array of combined square kernels, each
kernel2, // will be filled, should have correct size before call
kernelNumHor, // number of kernels in a row
tileX, // horizontal number of kernel to extract
tileY); // vertical number of kernel to extract
for (i=0; i<kernel1.length;i++) kernel1[i]-=kernel2[i];
}
if (blurSigma>0) gb.blurDouble(kernel1, kernelSize, kernelSize, blurSigma, blurSigma, 0.01);
/* Calculate sum of squared kernel1 elements */
sum=0.0;
for (i=0; i<kernel1.length;i++) sum+=kernel1[i]*kernel1[i];
outPixles[chn][tileY*kernelNumHor+tileX]= (float) (Math.sqrt(sum));
// System.out.println("Processing kernels, channel "+(chn+1)+" of "+nChn+", row "+(tileY+1)+" of "+kernelNumVert+" sum="+sum);
}
}
};
}
ImageDtt.startAndJoin(threads);
if (globalDebugLevel > 1) System.out.println("Threads done at "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3));
/* prepare result stack to return */
ImageStack outStack=new ImageStack(kernelNumHor,kernelNumVert);
for (i=0;i<nChn;i++) {
outStack.addSlice(kernelStack1.getSliceLabel(i+1), outPixles[i]);
}
return outStack;
}
// mostly for testing
//eyesisKernelImage
......@@ -645,6 +631,8 @@ public class EyesisDCT {
kernels[chn] = null;
}
}
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
DttRad2 dtt = new DttRad2(dct_parameters.dct_size);
......@@ -666,8 +654,10 @@ public class EyesisDCT {
ImageStack kernel_sym_stack= imp_kernel_sym.getStack();
ImageStack kernel_asym_stack= imp_kernel_asym.getStack();
System.out.println( " kernel_asym_stack.getWidth() = "+kernel_asym_stack.getWidth()+
if (debugLevel>0){
System.out.println(" kernel_asym_stack.getWidth() = "+kernel_asym_stack.getWidth()+
" kernel_asym_stack.getHeight() = "+kernel_asym_stack.getHeight());
}
int nColors = kernel_sym_stack.getSize();
kernels[chn]= new DCTKernels();
kernels[chn].size = dct_parameters.dct_size;
......@@ -707,6 +697,17 @@ public class EyesisDCT {
kernels[chn].asym_indx = new int [nColors][numVert][numHor][asym_nonzero];
int sym_kernel_inc_index = numHor * dct_size;
int asym_kernel_inc_index = numHor * asym_size;
double [] norm_sym_weights = new double [dct_size*dct_size];
for (int i = 0; i < dct_size; i++){
for (int j = 0; j < dct_size; j++){
double d = Math.cos(Math.PI*i/(2*dct_size))*Math.cos(Math.PI*j/(2*dct_size));
if (i > 0) d*= 2.0;
if (j > 0) d*= 2.0;
norm_sym_weights[i*dct_size+j] = d;
}
}
if (debugLevel>0) {
System.out.println("readDCTKernels() debugLevel = "+debugLevel+
" kernels["+chn+"].size = "+kernels[chn].size+
" kernels["+chn+"].img_step = "+kernels[chn].img_step+
......@@ -714,7 +715,7 @@ public class EyesisDCT {
" nColors = "+ nColors+
" numVert = "+ numVert+
" numHor = "+ numHor);
}
for (int nc = 0; nc < nColors; nc++){
for (int tileY = 0; tileY < numVert; tileY++){
for (int tileX = 0; tileX < numHor; tileX++){
......@@ -725,7 +726,7 @@ public class EyesisDCT {
for (int j = 0; (j < dct_parameters.asym_size) && (indx < asym_nonzero); j++){
double v = kernels[chn].asym_kernels[nc][asym_kernel_start_index + i * asym_kernel_inc_index +j];
if (v!=0.0){
if ((tileY==67) && (tileX==125)) {
if ((debugLevel>0) && (tileY==67) && (tileX==125)) {
System.out.println("i="+i+" j="+j+" v="+v+" indx="+indx+" i * asym_size + j="+(i * asym_size + j));
System.out.println("asym_kernel_start_index + i * asym_kernel_inc_index +j="+(asym_kernel_start_index + i * asym_kernel_inc_index +j));
}
......@@ -735,7 +736,7 @@ public class EyesisDCT {
}
}
}
if ((tileY==67) && (tileX==125)) {
if ((debugLevel>0) && (tileY==67) && (tileX==125)) {
for (int i=0; i<kernels[chn].asym_indx[nc][tileY][tileX].length; i++){
System.out.println("kernels["+chn+"].asym_val["+nc+"]["+tileY+"]["+tileX+"]["+i+"]="+kernels[chn].asym_val[nc][tileY][tileX][i]);
System.out.println("kernels["+chn+"].asym_indx["+nc+"]["+tileY+"]["+tileX+"]["+i+"]="+kernels[chn].asym_indx[nc][tileY][tileX][i]);
......@@ -746,15 +747,17 @@ public class EyesisDCT {
if (dct_parameters.normalize) {
for (int i = 0; i < kernels[chn].asym_val[nc][tileY][tileX].length;i++){
scale_asym += kernels[chn].asym_val[nc][tileY][tileX][i];
if ((tileY==67) && (tileX==125)) {
if ((debugLevel>0) && (tileY==67) && (tileX==125)) {
System.out.println("i="+i+", sum="+scale_asym);
}
}
double k = ((nc == 2)? 1.0:2.0) / scale_asym;
for (int i = 0; i < kernels[chn].asym_val[nc][tileY][tileX].length;i++){
kernels[chn].asym_val[nc][tileY][tileX][i] /= scale_asym;
// kernels[chn].asym_val[nc][tileY][tileX][i] /= scale_asym;
kernels[chn].asym_val[nc][tileY][tileX][i] *= k; // includes correction for different number of pixels in r,b(1/4) and G (2/4)
}
if ((tileY==67) && (tileX==125)) {
System.out.println("sum="+scale_asym+", normalized:");
if ((debugLevel>0) && (tileY==67) && (tileX==125)) {
System.out.println("nc="+nc+" sum="+scale_asym+", normalized:");
for (int i=0; i<kernels[chn].asym_indx[nc][tileY][tileX].length; i++){
System.out.println("kernels["+chn+"].asym_val["+nc+"]["+tileY+"]["+tileX+"]["+i+"]="+kernels[chn].asym_val[nc][tileY][tileX][i]);
......@@ -780,8 +783,16 @@ public class EyesisDCT {
kernels[chn].st_kernels[nc][tileY][tileX][i] *= scale_asym;
}
}
if (dct_parameters.dbg_mode == 0){ // normalize sym kernel regardless of asym:
// +++++++++++++++++++++++++++++++++++++++++
if (dct_parameters.normalize_sym){ // normalize sym kernel regardless of asym:
double scale_sym = 0.0;
for (int i = 0; i< norm_sym_weights.length; i++){
scale_sym += norm_sym_weights[i]*kernels[chn].st_kernels[nc][tileY][tileX][i];
}
/*
for (int i = 0; i< dct_size; i++){
for (int j = 0; j< dct_size; j++){
double d = kernels[chn].st_kernels[nc][tileY][tileX][i*dct_size+j];
......@@ -790,9 +801,14 @@ public class EyesisDCT {
scale_sym +=d;
}
}
*/
for (int i=0; i < kernels[chn].st_kernels[nc][tileY][tileX].length;i++) {
kernels[chn].st_kernels[nc][tileY][tileX][i] /= scale_sym;
}
if ((debugLevel > 0) && (tileY== dct_parameters.tileY) && (tileX==dct_parameters.tileX)) {
System.out.println("chn="+chn+" tileY="+tileY+", tileX"+tileY+" scale_sym="+scale_sym);
}
}
// Make a copy of direct kernels (debug feature, may be removed later)
for (int i = 0; i < dct_size;i++){
......@@ -815,8 +831,10 @@ public class EyesisDCT {
}
}
// Debug will be removed later, the
if (debugLevel > 0) {
sdfa_instance.showArrays(kernels[chn].sym_kernels, sym_width, sym_height, true, symKernelPaths[chn]);
sdfa_instance.showArrays(kernels[chn].asym_kernels, asym_width, asym_height, true, asymKernelPaths[chn]);
}
kernels[chn].sym_kernels = null; // not needed anymore
kernels[chn].asym_kernels = null; // not needed anymore
}
......@@ -882,11 +900,13 @@ public class EyesisDCT {
for (int i=0;i<asym_kernel.length; i++) asym_kernel[i] = 0.0;
for (int i = 0; i<kernels[chn].asym_indx[nc][tileY][tileX].length; i++){
asym_kernel[kernels[chn].asym_indx[nc][tileY][tileX][i]] = kernels[chn].asym_val[nc][tileY][tileX][i];
/*
if ((tileY==67) && (tileX==125)) {
System.out.println("kernels["+chn+"].asym_val["+nc+"]["+tileY+"]["+tileX+"]["+i+"]="+kernels[chn].asym_val[nc][tileY][tileX][i]);
System.out.println("kernels["+chn+"].asym_indx["+nc+"]["+tileY+"]["+tileX+"]["+i+"]="+kernels[chn].asym_indx[nc][tileY][tileX][i]);
System.out.println("asym_kernel["+(kernels[chn].asym_indx[nc][tileY][tileX][i])+"]="+asym_kernel[kernels[chn].asym_indx[nc][tileY][tileX][i]]);
}
*/
}
int asym_kernel_start_index = (asym_kernel_inc_index * tileY + tileX)* asym_size;
for (int i = 0; i < asym_size;i++){
......@@ -915,4 +935,890 @@ public class EyesisDCT {
kernels[chn].asym_kernels = null; // not needed anymore
kernels[chn].sym_direct = null; // not needed anymore
}
// public boolean isChannelEnabled(int channel){
// return ((channel>=0) && (channel<this.usedChannels.length) && this.usedChannels[channel]);
// }
public void processDCTChannelImages(
// EyesisCorrectionParameters.SplitParameters splitParameters,
EyesisCorrectionParameters.DCTParameters dct_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.NonlinParameters nonlinParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CorrectionColorProc.ColorGainsParameters channelGainParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
int convolveFFTSize, // 128 - fft size, kernel size should be size/2
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel)
{
this.startTime=System.nanoTime();
String [] sourceFiles=correctionsParameters.getSourcePaths();
boolean [] enabledFiles=new boolean[sourceFiles.length];
for (int i=0;i<enabledFiles.length;i++) enabledFiles[i]=false;
int numFilesToProcess=0;
int numImagesToProcess=0;
for (int nFile=0;nFile<enabledFiles.length;nFile++){
if ((sourceFiles[nFile]!=null) && (sourceFiles[nFile].length()>1)) {
int [] channels={correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile])};
if (correctionsParameters.isJP4()){
int subCamera= channels[0]- correctionsParameters.firstSubCamera; // to match those in the sensor files
channels=this.eyesisCorrections.pixelMapping.channelsForSubCamera(subCamera);
}
if (channels!=null){
for (int i=0;i<channels.length;i++) if (eyesisCorrections.isChannelEnabled(channels[i])){
if (!enabledFiles[nFile]) numFilesToProcess++;
enabledFiles[nFile]=true;
numImagesToProcess++;
}
}
}
}
if (numFilesToProcess==0){
System.out.println("No files to process (of "+sourceFiles.length+")");
return;
} else {
if (debugLevel>0) System.out.println(numFilesToProcess+ " files to process (of "+sourceFiles.length+"), "+numImagesToProcess+" images to process");
}
double [] referenceExposures=eyesisCorrections.calcReferenceExposures(debugLevel); // multiply each image by this and divide by individual (if not NaN)
int [][] fileIndices=new int [numImagesToProcess][2]; // file index, channel number
int index=0;
for (int nFile=0;nFile<enabledFiles.length;nFile++){
if ((sourceFiles[nFile]!=null) && (sourceFiles[nFile].length()>1)) {
int [] channels={correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile])};
if (correctionsParameters.isJP4()){
int subCamera= channels[0]- correctionsParameters.firstSubCamera; // to match those in the sensor files
channels=eyesisCorrections.pixelMapping.channelsForSubCamera(subCamera);
}
if (channels!=null){
for (int i=0;i<channels.length;i++) if (eyesisCorrections.isChannelEnabled(channels[i])){
fileIndices[index ][0]=nFile;
fileIndices[index++][1]=channels[i];
}
}
}
}
for (int iImage=0;iImage<fileIndices.length;iImage++){
int nFile=fileIndices[iImage][0];
ImagePlus imp_src=null;
// int srcChannel=correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile]);
int srcChannel=fileIndices[iImage][1];
if (correctionsParameters.isJP4()){
int subchannel=eyesisCorrections.pixelMapping.getSubChannel(srcChannel);
if (this.correctionsParameters.swapSubchannels01) {
switch (subchannel){
case 0: subchannel=1; break;
case 1: subchannel=0; break;
}
}
if (debugLevel>0) System.out.println("Processing channel "+fileIndices[iImage][1]+" - subchannel "+subchannel+" of "+sourceFiles[nFile]);
ImagePlus imp_composite=eyesisCorrections.JP4_INSTANCE.open(
"", // path,
sourceFiles[nFile],
"", //arg - not used in JP46 reader
true, // un-apply camera color gains
null, // new window
false); // do not show
imp_src=eyesisCorrections.JP4_INSTANCE.demuxImage(imp_composite, subchannel);
if (imp_src==null) imp_src=imp_composite; // not a composite image
// do we need to add any properties?
} else {
imp_src=new ImagePlus(sourceFiles[nFile]);
// (new JP46_Reader_camera(false)).decodeProperiesFromInfo(imp_src); // decode existent properties from info
eyesisCorrections.JP4_INSTANCE.decodeProperiesFromInfo(imp_src); // decode existent properties from info
if (debugLevel>0) System.out.println("Processing "+sourceFiles[nFile]);
}
double scaleExposure=1.0;
if (!Double.isNaN(referenceExposures[nFile]) && (imp_src.getProperty("EXPOSURE")!=null)){
scaleExposure=referenceExposures[nFile]/Double.parseDouble((String) imp_src.getProperty("EXPOSURE"));
// imp_src.setProperty("scaleExposure", scaleExposure); // it may already have channel
if (debugLevel>0) System.out.println("Will scale intensity (to compensate for exposure) by "+scaleExposure);
}
imp_src.setProperty("name", correctionsParameters.getNameFromSourceTiff(sourceFiles[nFile]));
imp_src.setProperty("channel", srcChannel); // it may already have channel
imp_src.setProperty("path", sourceFiles[nFile]); // it may already have channel
// ImagePlus result=processChannelImage( // returns ImagePlus, but it already should be saved/shown
processDCTChannelImage( // returns ImagePlus, but it already should be saved/shown
imp_src, // should have properties "name"(base for saving results), "channel","path"
// splitParameters,
dct_parameters,
debayerParameters,
nonlinParameters,
colorProcParameters,
channelGainParameters,
rgbParameters,
convolveFFTSize, // 128 - fft size, kernel size should be size/2
scaleExposure,
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevel);
// warp result (add support for different color modes)
if (this.correctionsParameters.equirectangular){
if (equirectangularParameters.clearFullMap) eyesisCorrections.pixelMapping.deleteEquirectangularMapFull(srcChannel); // save memory? //removeUnusedSensorData - no, use equirectangular specific settings
if (equirectangularParameters.clearAllMaps) eyesisCorrections.pixelMapping.deleteEquirectangularMapAll(srcChannel); // save memory? //removeUnusedSensorData - no, use equirectangular specific settings
}
//pixelMapping
Runtime.getRuntime().gc();
if (debugLevel>0) System.out.println("Processing image "+(iImage+1)+" (of "+fileIndices.length+") finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
if (eyesisCorrections.stopRequested.get()>0) {
System.out.println("User requested stop");
return;
}
}
}
public ImagePlus processDCTChannelImage(
ImagePlus imp_src, // should have properties "name"(base for saving results), "channel","path"
// EyesisCorrectionParameters.SplitParameters splitParameters, // will not be used !
EyesisCorrectionParameters.DCTParameters dct_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.NonlinParameters nonlinParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CorrectionColorProc.ColorGainsParameters channelGainParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
int convolveFFTSize, // 128 - fft size, kernel size should be size/2
double scaleExposure,
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel){
boolean advanced=this.correctionsParameters.zcorrect || this.correctionsParameters.equirectangular;
boolean crop= advanced? true: this.correctionsParameters.crop;
boolean rotate= advanced? false: this.correctionsParameters.rotate;
double JPEG_scale= advanced? 1.0: this.correctionsParameters.JPEG_scale;
boolean toRGB= advanced? true: this.correctionsParameters.toRGB;
// may use this.StartTime to report intermediate steps execution times
String name=(String) imp_src.getProperty("name");
// int channel= Integer.parseInt((String) imp_src.getProperty("channel"));
int channel= (Integer) imp_src.getProperty("channel");
String path= (String) imp_src.getProperty("path");
if (this.correctionsParameters.pixelDefects && (eyesisCorrections.defectsXY!=null)&& (eyesisCorrections.defectsXY[channel]!=null)){
// apply pixel correction
int numApplied= eyesisCorrections.correctDefects(
imp_src,
channel,
debugLevel);
if ((debugLevel>0) && (numApplied>0)) { // reduce verbosity after verified defect correction works
System.out.println("Corrected "+numApplied+" pixels in "+path);
}
}
if (this.correctionsParameters.vignetting){
if ((eyesisCorrections.channelVignettingCorrection==null) || (channel<0) || (channel>=eyesisCorrections.channelVignettingCorrection.length) || (eyesisCorrections.channelVignettingCorrection[channel]==null)){
System.out.println("No vignetting data for channel "+channel);
return null;
}
float [] pixels=(float []) imp_src.getProcessor().getPixels();
if (pixels.length!=eyesisCorrections.channelVignettingCorrection[channel].length){
System.out.println("Vignetting data for channel "+channel+" has "+eyesisCorrections.channelVignettingCorrection[channel].length+" pixels, image "+path+" has "+pixels.length);
return null;
}
// TODO: Move to do it once:
double min_non_zero = 0.0;
for (int i=0;i<pixels.length;i++){
double d = eyesisCorrections.channelVignettingCorrection[channel][i];
if ((d > 0.0) && ((min_non_zero == 0) || (min_non_zero > d))){
min_non_zero = d;
}
}
double max_vign_corr = dct_parameters.vignetting_range*min_non_zero;
System.out.println("Vignetting data: channel="+channel+", min = "+min_non_zero);
for (int i=0;i<pixels.length;i++){
double d = eyesisCorrections.channelVignettingCorrection[channel][i];
if (d > max_vign_corr) d = max_vign_corr;
pixels[i]*=d;
}
}
String title=name+"-"+String.format("%02d", channel);
ImagePlus result=imp_src;
if (debugLevel>1) System.out.println("processing: "+path);
result.setTitle(title+"RAW");
if (!this.correctionsParameters.split){
eyesisCorrections.saveAndShow(result, this.correctionsParameters);
return result;
}
// Generate split parameters for DCT processing mode
EyesisCorrectionParameters.SplitParameters splitParameters = new EyesisCorrectionParameters.SplitParameters(
(dct_parameters.decimation == 2)?1:2, // oversample; // currently source kernels are oversampled
dct_parameters.dct_size/2, // addLeft
dct_parameters.dct_size/2, // addTop
dct_parameters.dct_size/2, // addRight
dct_parameters.dct_size/2 // addBottom
);
// Split into Bayer components, oversample, increase canvas
ImageStack stack= eyesisCorrections.bayerToStack(
result, // source Bayer image, linearized, 32-bit (float))
splitParameters);
String titleFull=title+"-SPLIT";
if (!this.correctionsParameters.debayer) {
result= new ImagePlus(titleFull, stack);
eyesisCorrections.saveAndShow(result, this.correctionsParameters);
return result;
}
// =================
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
if (this.correctionsParameters.deconvolve) { // process with DCT, otherwise use simple debayer
ImageDtt image_dtt = new ImageDtt();
double [][][][] dctdc_data = image_dtt.mdctDcStack(
stack,
dct_parameters,
this,
threadsMax,
debugLevel,
updateStatus);
System.out.println("dctdc_data.length="+dctdc_data.length+" dctdc_data[0].length="+dctdc_data[0].length
+" dctdc_data[0][0].length="+dctdc_data[0][0].length+" dctdc_data[0][0][0].length="+dctdc_data[0][0][0].length);
if (dct_parameters.color_DCT){ // convert RBG -> YPrPb
dctdc_data = image_dtt.dct_color_convert(
dctdc_data,
colorProcParameters.kr,
colorProcParameters.kb,
dct_parameters.sigma_rb, // blur of channels 0,1 (r,b) in addition to 2 (g)
dct_parameters.sigma_y, // blur of Y from G
dct_parameters.sigma_color, // blur of Pr, Pb in addition to Y
threadsMax,
debugLevel);
} else { // just LPF RGB
for (int chn = 0; chn < dctdc_data.length; chn++) {
image_dtt.dct_lpf(
dct_parameters.dbg_sigma,
dctdc_data[chn],
threadsMax,
debugLevel);
}
}
int tilesY = stack.getHeight()/dct_parameters.dct_size - 1;
int tilesX = stack.getWidth()/dct_parameters.dct_size - 1;
if (debugLevel>0){
System.out.println("--tilesX="+tilesX);
System.out.println("--tilesY="+tilesY);
}
double [][] dct_dc = new double [dctdc_data.length][];
double [][] dct_ac = new double [dctdc_data.length][];
for (int chn = 0; chn < dct_dc.length; chn++) {
if (!dct_parameters.color_DCT){ // convert RBG -> YPrPb
dct_dc[chn] = image_dtt.lapped_dct_dcac(
false, // out_ac, // false - output DC, true - output AC
dctdc_data [chn],
threadsMax,
debugLevel);
}
dct_ac[chn] = image_dtt.lapped_dct_dcac(
true, // out_ac, // false - output DC, true - output AC
dctdc_data [chn],
threadsMax,
debugLevel);
}
// System.out.println("dct_dc.length="+dct_dc.length+" dct_ac.length="+dct_ac.length);
if (debugLevel > 0){
sdfa_instance.showArrays(dct_ac,
tilesX*dct_parameters.dct_size,
tilesY*dct_parameters.dct_size,
true,
result.getTitle()+"-DCT_AC");
if (!dct_parameters.color_DCT){ // convert RBG -> YPrPb
sdfa_instance.showArrays(dct_dc,
tilesX,
tilesY,
true,
result.getTitle()+"-DCT_DC");
}
}
double [][] idct_data = new double [dctdc_data.length][];
for (int chn=0; chn<idct_data.length;chn++){
idct_data[chn] = image_dtt.lapped_idctdc(
dctdc_data[chn], // scanline representation of dcd data, organized as dct_size x dct_size tiles
dct_parameters.dct_size, // final int
dct_parameters.dct_window, //window_type
threadsMax,
debugLevel);
}
if (dct_parameters.color_DCT){ // convert RBG -> YPrPb
sdfa_instance.showArrays(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-IDCTDC-YPrPb");
if (dct_parameters.nonlin && ((dct_parameters.nonlin_y != 0.0) || (dct_parameters.nonlin_c != 0.0))) {
System.out.println("Applying edge emphasis, nonlin_y="+dct_parameters.nonlin_y+
", nonlin_c="+dct_parameters.nonlin_c+", nonlin_corn="+dct_parameters.nonlin_corn);
idct_data = edge_emphasis(
idct_data, // final double [][] yPrPb,
(tilesX + 1) * dct_parameters.dct_size, // final int width,
dct_parameters.dct_size, // final int step, //(does not need to be this) // just for multi-threading efficiency?
dct_parameters.nonlin_max_y, // final double nonlin_max_y = 1.0; // maximal amount of nonlinear line/edge emphasis for Y component
dct_parameters.nonlin_max_c, // final double nonlin_max_c = 1.0; // maximal amount of nonlinear line/edge emphasis for C component
dct_parameters.nonlin_y, // final double nonlin_y, // = 0.01; // amount of nonlinear line/edge emphasis for Y component
dct_parameters.nonlin_c, // final double nonlin_c, // = 0.01; // amount of nonlinear line/edge emphasis for C component
dct_parameters.nonlin_corn, // final double nonlin_corn, // = 0.5; // relative weight for nonlinear corner elements
(dct_parameters.denoise? dct_parameters.denoise_y:0.0), // final double denoise_y, // = 1.0; // maximal total smoothing of the Y post-kernel (will compete with edge emphasis)
(dct_parameters.denoise? dct_parameters.denoise_c:0.0), // final double denoise_c, // = 1.0; // maximal total smoothing of the color differences post-kernel (will compete with edge emphasis)
dct_parameters.denoise_y_corn, // final double denoise_y_corn, // = 0.5; // weight of the 4 corner pixels during denoise y (relative to 4 straight)
dct_parameters.denoise_c_corn, // final double denoise_c_corn, // = 0.5; // weight of the 4 corner pixels during denoise y (relative to 4 straight)
threadsMax, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel)
sdfa_instance.showArrays(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-EMPH-"+dct_parameters.nonlin_y+"_"+dct_parameters.nonlin_c+"_"+dct_parameters.nonlin_corn);
}
// temporary convert back to RGB
idct_data = YPrPbToRBG(idct_data,
colorProcParameters.kr, // 0.299;
colorProcParameters.kb, // 0.114;
(tilesX + 1) * dct_parameters.dct_size);
} else {
System.out.println("Bypassing nonlinear correction");
}
sdfa_instance.showArrays(idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-IDCTDC-RGB");
// convert to ImageStack of 3 slices
String [] sliceNames = {"red", "blue", "green"};
stack = sdfa_instance.makeStack(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
sliceNames); // or use null to get chn-nn slice names
} else { // if (this.correctionsParameters.deconvolve) - here use a simple debayer
System.out.println("Bypassing DCTR-based aberration correction");
debayer_rbg(stack, 0.25); // simple standard 3x3 kernel debayer
}
if (!this.correctionsParameters.colorProc){
result= new ImagePlus(titleFull, stack);
eyesisCorrections.saveAndShow(
result,
this.correctionsParameters);
return result;
}
System.out.println("before colors.1");
//Processing colors - changing stack sequence to r-g-b (was r-b-g)
if (!eyesisCorrections.fixSliceSequence(
stack,
debugLevel)){
if (debugLevel > -1) System.out.println("fixSliceSequence() returned false");
return null;
}
System.out.println("before colors.2");
if (debugLevel > -1){
ImagePlus imp_dbg=new ImagePlus(imp_src.getTitle()+"-"+channel+"-preColors",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
System.out.println("before colors.3, scaleExposure="+scaleExposure+" scale = "+(255.0/eyesisCorrections.psfSubpixelShouldBe4/eyesisCorrections.psfSubpixelShouldBe4/scaleExposure));
CorrectionColorProc correctionColorProc=new CorrectionColorProc(eyesisCorrections.stopRequested);
double [][] yPrPb=new double [3][];
// if (dct_parameters.color_DCT){
// need to get YPbPr - not RGB here
// } else {
correctionColorProc.processColorsWeights(stack, // just gamma convert? TODO: Cleanup? Convert directly form the linear YPrPb
// 255.0/this.psfSubpixelShouldBe4/this.psfSubpixelShouldBe4, // double scale, // initial maximal pixel value (16))
// 255.0/eyesisCorrections.psfSubpixelShouldBe4/eyesisCorrections.psfSubpixelShouldBe4/scaleExposure, // double scale, // initial maximal pixel value (16))
// 255.0/2/2/scaleExposure, // double scale, // initial maximal pixel value (16))
255.0/scaleExposure, // double scale, // initial maximal pixel value (16))
colorProcParameters,
channelGainParameters,
channel,
null, //correctionDenoise.getDenoiseMask(),
this.correctionsParameters.blueProc,
debugLevel);
if (debugLevel > -1) System.out.println("Processed colors to YPbPr, total number of slices="+stack.getSize());
if (debugLevel > 0){
ImagePlus imp_dbg=new ImagePlus("procColors",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
float [] fpixels;
int [] slices_YPrPb = {8,6,7};
yPrPb=new double [3][];
for (int n = 0; n < slices_YPrPb.length; n++){
fpixels = (float[]) stack.getPixels(slices_YPrPb[n]);
yPrPb[n] = new double [fpixels.length];
for (int i = 0; i < fpixels.length; i++) yPrPb[n][i] = fpixels[i];
}
// }
/*
String [] slice_names_YPrPb={"Y","Pr","Pb"};
sdfa_instance.showArrays(idct_data,
stack.getWidth(), // (tilesX + 1) * dct_parameters.dct_size,
stack.getHeight(), // (tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-YPrPb",
slice_names_YPrPb);
*/
if (toRGB) {
System.out.println("correctionColorProc.YPrPbToRGB");
stack = YPrPbToRGB(yPrPb,
colorProcParameters.kr, // 0.299;
colorProcParameters.kb, // 0.114;
stack.getWidth());
/*
correctionColorProc.YPrPbToRGB(stack,
colorProcParameters.kr, // 0.299;
colorProcParameters.kb, // 0.114;
colorProcParameters.useFirstY?9:8, // int sliceY,
6, // int slicePr,
7// int slicePb
);
*/
title=titleFull; // including "-DECONV" or "-COMBO"
titleFull=title+"-RGB-float";
//Trim stack to just first 3 slices
if (debugLevel > 0){ // 2){
ImagePlus imp_dbg=new ImagePlus("YPrPbToRGB",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
while (stack.getSize() > 3) stack.deleteLastSlice();
if (debugLevel > -1) System.out.println("Trimming color stack");
} else {
title=titleFull; // including "-DECONV" or "-COMBO"
titleFull=title+"-YPrPb"; // including "-DECONV" or "-COMBO"
if (debugLevel > -1) System.out.println("Using full stack, including YPbPr");
}
result= new ImagePlus(titleFull, stack);
// Crop image to match original one (scaled to oversampling)
if (crop){ // always crop if equirectangular
System.out.println("cropping");
stack = eyesisCorrections.cropStack32(stack,splitParameters);
if (debugLevel > -1) { // 2){
ImagePlus imp_dbg=new ImagePlus("cropped",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
}
// rotate the result
if (rotate){ // never rotate for equirectangular
stack=eyesisCorrections.rotateStack32CW(stack);
}
if (!toRGB && !this.correctionsParameters.jpeg){ // toRGB set for equirectangular
System.out.println("!toRGB && !this.correctionsParameters.jpeg");
eyesisCorrections.saveAndShow(result, this.correctionsParameters);
return result;
} else { // that's not the end result, save if required
System.out.println("!toRGB && !this.correctionsParameters.jpeg - else");
eyesisCorrections.saveAndShow(result,
eyesisCorrections.correctionsParameters,
eyesisCorrections.correctionsParameters.save32,
false,
eyesisCorrections.correctionsParameters.JPEG_quality); // save, no show
}
// convert to RGB48 (16 bits per color component)
ImagePlus imp_RGB;
stack=eyesisCorrections.convertRGB32toRGB16Stack(
stack,
rgbParameters);
titleFull=title+"-RGB48";
result= new ImagePlus(titleFull, stack);
// ImagePlus imp_RGB24;
result.updateAndDraw();
System.out.println("result.updateAndDraw(), "+titleFull+"-RGB48");
CompositeImage compositeImage=eyesisCorrections.convertToComposite(result);
if (!this.correctionsParameters.jpeg && !advanced){ // RGB48 was the end result
System.out.println("if (!this.correctionsParameters.jpeg && !advanced)");
eyesisCorrections.saveAndShow(compositeImage, this.correctionsParameters);
return result;
} else { // that's not the end result, save if required
System.out.println("if (!this.correctionsParameters.jpeg && !advanced) - else");
eyesisCorrections.saveAndShow(compositeImage, this.correctionsParameters, this.correctionsParameters.save16, false); // save, no show
// eyesisCorrections.saveAndShow(compositeImage, this.correctionsParameters, this.correctionsParameters.save16, true); // save, no show
}
imp_RGB=eyesisCorrections.convertRGB48toRGB24(
stack,
title+"-RGB24",
0, 65536, // r range 0->0, 65536->256
0, 65536, // g range
0, 65536);// b range
if (JPEG_scale!=1.0){
ImageProcessor ip=imp_RGB.getProcessor();
ip.setInterpolationMethod(ImageProcessor.BICUBIC);
ip=ip.resize((int)(ip.getWidth()*JPEG_scale),(int) (ip.getHeight()*JPEG_scale));
imp_RGB= new ImagePlus(imp_RGB.getTitle(),ip);
imp_RGB.updateAndDraw();
}
eyesisCorrections.saveAndShow(imp_RGB, this.correctionsParameters);
return result;
}
public ImageStack YPrPbToRGB(double [][] yPrPb,
double Kr, // 0.299;
double Kb, // 0.114;
int width
) {
int length = yPrPb[0].length;
int height = length/width;
float [] fpixels_r= new float [length];
float [] fpixels_g= new float [length];
float [] fpixels_b= new float [length];
double Kg=1.0-Kr-Kb;
int i;
/**
R= Y+ Pr*2.0*(1-Kr)
B= Y+ Pb*2.0*(1-Kb)
G= Y +Pr*(- 2*Kr*(1-Kr))/Kg + Pb*(-2*Kb*(1-Kb))/Kg
*/
double KPrR= 2.0*(1-Kr);
double KPbB= 2.0*(1-Kb);
double KPrG= -2.0*Kr*(1-Kr)/Kg;
double KPbG= -2.0*Kb*(1-Kb)/Kg;
double Y,Pr,Pb;
for (i=0;i<length;i++) {
Pb=yPrPb[2][i];
Pr=yPrPb[1][i];
Y =yPrPb[0][i];
fpixels_r[i]=(float) (Y+ Pr*KPrR);
fpixels_b[i]=(float) (Y+ Pb*KPbB);
fpixels_g[i]=(float) (Y+ Pr*KPrG + Pb*KPbG);
}
ImageStack stack=new ImageStack(width,height);
stack.addSlice("red", fpixels_r);
stack.addSlice("green", fpixels_g);
stack.addSlice("blue", fpixels_b);
return stack;
}
public double [][] YPrPbToRBG(double [][] yPrPb,
double Kr, // 0.299;
double Kb, // 0.114;
int width
) {
int length = yPrPb[0].length;
// int height = length/width;
double [][]rbg = new double[3][length];
double Kg=1.0-Kr-Kb;
int i;
/**
R= Y+ Pr*2.0*(1-Kr)
B= Y+ Pb*2.0*(1-Kb)
G= Y +Pr*(- 2*Kr*(1-Kr))/Kg + Pb*(-2*Kb*(1-Kb))/Kg
*/
double KPrR= 2.0*(1-Kr);
double KPbB= 2.0*(1-Kb);
double KPrG= -2.0*Kr*(1-Kr)/Kg;
double KPbG= -2.0*Kb*(1-Kb)/Kg;
double Y,Pr,Pb;
for (i=0;i<length;i++) {
Pb=yPrPb[2][i];
Pr=yPrPb[1][i];
Y =yPrPb[0][i];
rbg[0][i]=(float) (Y+ Pr*KPrR);
rbg[1][i]=(float) (Y+ Pb*KPbB);
rbg[2][i]=(float) (Y+ Pr*KPrG + Pb*KPbG);
}
return rbg;
}
public double [][] edge_emphasis(
final double [][] yPrPb,
final int width,
final int step, // just for multi-threading efficiency?
final double nonlin_max_y, // = 1.0; // maximal amount of nonlinear line/edge emphasis for Y component
final double nonlin_max_c, // = 1.0; // maximal amount of nonlinear line/edge emphasis for C component
final double nonlin_y, // = 0.01; // amount of nonlinear line/edge emphasis for Y component
final double nonlin_c, // = 0.01; // amount of nonlinear line/edge emphasis for C component
final double nonlin_corn, // = 0.5; // relative weight for nonlinear corner elements
final double denoise_y, // = 1.0; // maximal total smoothing of the Y post-kernel (will compete with edge emphasis)
final double denoise_c, // = 1.0; // maximal total smoothing of the color differences post-kernel (will compete with edge emphasis)
final double denoise_y_corn, // = 0.3; // weight of the 4 corner pixels during denoise y (relative to 4 straight)
final double denoise_c_corn, // = 0.3; // weight of the 4 corner pixels during denoise y (relative to 4 straight)
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
final double [][] yPrPb_new = new double [yPrPb.length][yPrPb[0].length];
final int height = yPrPb[0].length/width;
final int tilesY = (height + step - 1) / step;
final int tilesX = (width + step - 1) / step;
final int nTiles=tilesX*tilesY;
final int [][] probes = {{1,7},{3,5},{2,6},{0,8}}; // indices in [012/345/678] 3x3 square to calculate squared sums of differences
final int [][] kerns = {{ 1, 3, 5, 7}, {1, 3, 5, 7}, {0, 2, 6, 8}, {0, 2, 6, 8}}; // indices in [012/345/678] 3x3 square to convolve data
final double [][] kernsw = {{-1.0,1.0,1.0,-1.0},{1.0,-1.0,-1.0,1.0},{1.0,-1.0,-1.0,1.0},{-1.0,1.0,1.0,-1.0}}; // weights of kern elements
final int [] neib_indices = {-width-1,-width,-width+1,-1,0,1,width-1,width,width+1};
final double [][] kernsw_y = new double [kernsw.length][];
final double [][] kernsw_c = new double [kernsw.length][];
final double [] denoise_kern_y = {
0.125*denoise_y*denoise_y_corn, 0.125*denoise_y*(1.0-denoise_y_corn), 0.125*denoise_y*denoise_y_corn,
0.125*denoise_y*(1.0-denoise_y_corn), -0.5* denoise_y, 0.125*denoise_y*(1.0-denoise_y_corn),
0.125*denoise_y*denoise_y_corn, 0.125*denoise_y*(1.0-denoise_y_corn), 0.125*denoise_y*denoise_y_corn};
final double [] denoise_kern_c = {
0.125*denoise_c*denoise_c_corn, 0.125*denoise_c*(1.0-denoise_c_corn), 0.125*denoise_c*denoise_c_corn,
0.125*denoise_c*(1.0-denoise_c_corn), -0.5* denoise_c, 0.125*denoise_c*(1.0-denoise_c_corn),
0.125*denoise_c*denoise_c_corn, 0.125*denoise_c*(1.0-denoise_c_corn), 0.125*denoise_c*denoise_c_corn};
for (int n = 0; n < kernsw.length; n++){
kernsw_y[n] = new double [kernsw[n].length];
kernsw_c[n] = new double [kernsw[n].length];
for (int i = 0; i < kernsw[n].length; i++){
double dy = nonlin_y * ((n>=2)? nonlin_corn: 1.0);
double dc = nonlin_c * ((n>=2)? nonlin_corn: 1.0);
kernsw_y[n][i] = kernsw[n][i] * dy* dy;
kernsw_c[n][i] = kernsw[n][i] * dc* dc;
}
}
if (globalDebugLevel > 0){
System.out.println("edge_emphasis(): nonlin_y="+nonlin_y+
", nonlin_c="+nonlin_c+", nonlin_corn="+nonlin_corn);
for (int n=0; n<kernsw_y.length; n++){
System.out.print("kernsw_y["+n+"={");
for (int i = 0; i < kernsw_y[n].length; i++){
System.out.print(kernsw_y[n][i]);
if (i == (kernsw_y[n].length - 1)){
System.out.println("}");
} else {
System.out.print(", ");
}
}
}
for (int n=0; n<kernsw_c.length; n++){
System.out.print("kernsw_c["+n+"={");
for (int i = 0; i < kernsw_c[n].length; i++){
System.out.print(kernsw_c[n][i]);
if (i == (kernsw_c[n].length - 1)){
System.out.println("}");
} else {
System.out.print(", ");
}
}
}
}
final Thread[] threads = ImageDtt.newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
int tileY,tileX;
double [] neibs = new double[9]; // pixels around current, first Y, then each color diff
double [] kern_y = new double[9]; // weights of neighbors to add to the current for Y
double [] kern_c = new double[9]; // weights of neighbors to add to the current for colors diffs
int center_index = 4;
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
tileY = nTile/tilesX;
tileX = nTile - tileY * tilesX;
int y1 = (tileY +1) * step;
if (y1 > height) y1 = height;
int x1 = (tileX +1) * step;
if (x1 > width) x1 = width;
for (int y = tileY * step; y < y1; y++) {
for (int x = tileX * step; x < x1; x++) {
int indx = y*width + x;
for (int n = 0; n < yPrPb.length; n++) {
yPrPb_new[n][indx] = yPrPb[n][indx]; // default - just copy old value
}
if ((y > 0) && (y < (height - 1)) && (x > 0) && (x < (width - 1))) { // only correct those not on the edge
// read Y pixels around current
for (int i = 0; i < neibs.length; i++){
neibs[i] = yPrPb[0][indx+neib_indices[i]];
}
for (int i = 0; i < kern_y.length; i++){
kern_y[i] = 0.0;
kern_c[i] = 0.0;
}
// calculate amount of each pattern
for (int n = 0; n < probes.length; n++){
double squared=0.0;
for (int i = 0; i < probes[n].length; i++){
squared += neibs[probes[n][i]];
}
squared -= probes[n].length * neibs[center_index];
squared *= squared; // Now it a square of the sum of differences from the center
for (int i = 0; i < kernsw[n].length; i++){
int ni = kerns[n][i];
kern_y[ni]+= squared*kernsw_y[n][i];
kern_c[ni]+= squared*kernsw_c[n][i];
}
}
if (denoise_y > 0.0){
for (int i = 0; i < kern_y.length; i++){
kern_y[i]+= denoise_kern_y[i];
}
}
if (denoise_c > 0.0){
for (int i = 0; i < kern_y.length; i++){
kern_c[i]+= denoise_kern_c[i];
}
}
if (nonlin_max_y != 0){
double sum = 0;
for (int i = 0; i < kern_y.length; i++){
// (i != 4) just increases denoise maximal value
if (i != 4) sum += Math.abs(kern_y[i]);
}
if (sum > nonlin_max_y){
sum = nonlin_max_y/sum;
for (int i = 0; i < kern_y.length; i++){
kern_y[i] *= sum;
}
}
}
if (nonlin_max_c != 0){
double sum = 0;
for (int i = 0; i < kern_c.length; i++){
sum += Math.abs(kern_c[i]);
}
if (sum > nonlin_max_c){
sum = nonlin_max_c/sum;
for (int i = 0; i < kern_c.length; i++){
kern_c[i] *= sum;
}
}
}
for (int i = 0; i<kern_y.length; i++){
yPrPb_new[0][indx] += neibs[i]*kern_y[i];
}
for (int n = 1;n < 3; n++){ //color components
for (int i = 0; i < neibs.length; i++){
yPrPb_new[n][indx] += yPrPb[0][indx+neib_indices[i]]*kern_c[i];
}
}
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return yPrPb_new;
}
public void debayer_rbg(
ImageStack stack_rbg){
debayer_rbg(stack_rbg, 1.0);
}
// Simple in-place debayer by (bi) linear approximation, assumes [0R/00], [00/B0], [G0/0G] slices
public void debayer_rbg(
ImageStack stack_rbg,
double scale)
{
int width = stack_rbg.getWidth();
int height = stack_rbg.getHeight();
float [] fpixels_r = (float[]) stack_rbg.getPixels(1);
float [] fpixels_b = (float[]) stack_rbg.getPixels(2);
float [] fpixels_g = (float[]) stack_rbg.getPixels(3);
int [][][] av_row = {
{{1,1},{-1,1},{-1,-1}},
{{1,1},{-1,1},{-1,-1}},
{{1,1},{-1,1},{-1,-1}}};
int [][][] av_col = {
{{ width, width},{ width, width},{ width, width}},
{{-width, width},{-width, width},{-width, width}},
{{-width,-width},{-width,-width},{-width,-width}}};
int [][][] av_xcross = {
{{ width+1, width+1, width+1, width+1}, { width-1, width+1, width-1, width+1}, { width-1, width-1, width-1, width-1}},
{{-width+1, width+1,-width+1, width+1}, {-width-1,-width+1, width-1, width+1}, {-width-1,-width-1, width-1, width-1}},
{{-width+1,-width+1,-width+1,-width+1}, {-width-1,-width+1,-width-1,-width+1}, {-width-1,-width-1,-width-1,-width-1}}};
int [][][] av_plus = {
{{ width, 1, 1, width}, { width, -1, 1, width }, { width, -1, -1, width }},
{{-width, 1, 1, width}, {-width, -1, 1, width }, {-width, -1, -1, width }},
{{-width, 1, 1,-width}, {-width, -1, 1,-width }, {-width, -1, -1,-width }}};
for (int y = 0; y < height; y++){
boolean odd_row = (y & 1) != 0;
int row_type = (y==0)? 0: ((y==(height-1))?2:1);
for (int x = 0; x < width; x++){
int indx = y*width+x;
boolean odd_col = (x & 1) != 0;
int col_type = (x==0)? 0: ((x==(width-1))?2:1);
if (odd_row){
if (odd_col){ // GB site
fpixels_r[indx] = 0.5f*(
fpixels_r[indx+av_col[row_type][col_type][0]]+
fpixels_r[indx+av_col[row_type][col_type][1]]);
fpixels_b[indx] = 0.5f*(
fpixels_b[indx+av_row[row_type][col_type][0]]+
fpixels_b[indx+av_row[row_type][col_type][1]]);
} else { // !odd col // B site
fpixels_r[indx] = 0.25f*(
fpixels_r[indx+av_xcross[row_type][col_type][0]]+
fpixels_r[indx+av_xcross[row_type][col_type][1]]+
fpixels_r[indx+av_xcross[row_type][col_type][2]]+
fpixels_r[indx+av_xcross[row_type][col_type][3]]);
fpixels_g[indx] = 0.25f*(
fpixels_g[indx+av_plus[row_type][col_type][0]]+
fpixels_g[indx+av_plus[row_type][col_type][1]]+
fpixels_g[indx+av_plus[row_type][col_type][2]]+
fpixels_g[indx+av_plus[row_type][col_type][3]]);
}
} else { // !odd_row
if (odd_col){ // R site
fpixels_b[indx] = 0.25f*(
fpixels_b[indx+av_xcross[row_type][col_type][0]]+
fpixels_b[indx+av_xcross[row_type][col_type][1]]+
fpixels_b[indx+av_xcross[row_type][col_type][2]]+
fpixels_b[indx+av_xcross[row_type][col_type][3]]);
fpixels_g[indx] = 0.25f*(
fpixels_g[indx+av_plus[row_type][col_type][0]]+
fpixels_g[indx+av_plus[row_type][col_type][1]]+
fpixels_g[indx+av_plus[row_type][col_type][2]]+
fpixels_g[indx+av_plus[row_type][col_type][3]]);
} else { // !odd col // G site
fpixels_r[indx] = 0.5f*(
fpixels_r[indx+av_row[row_type][col_type][0]]+
fpixels_r[indx+av_row[row_type][col_type][1]]);
fpixels_b[indx] = 0.5f*(
fpixels_b[indx+av_col[row_type][col_type][0]]+
fpixels_b[indx+av_col[row_type][col_type][1]]);
}
}
}
}
if (scale !=1.0){
for (int i = 0; i< fpixels_r.length; i++){
fpixels_r[i] *= scale;
fpixels_b[i] *= scale;
fpixels_g[i] *= scale;
}
}
}
}
......@@ -499,6 +499,8 @@ private Panel panel1,panel2,panel3,panel4,panel5,panel5a, panel6,panel7,panelPos
addButton("Select kernels image", panelDct1, color_configure);
addButton("Create DCT kernels", panelDct1, color_process);
addButton("Read DCT kernels", panelDct1, color_process);
addButton("Setup DCT parameters", panelDct1, color_configure);
addButton("DCT process files", panelDct1, color_process);
add(panelDct1);
}
pack();
......@@ -2790,6 +2792,7 @@ private Panel panel1,panel2,panel3,panel4,panel5,panel5a, panel6,panel7,panelPos
DEBUG_LEVEL);
}
// System.out.println("dct_dc.length="+dct_dc.length+" dct_ac.length="+dct_ac.length);
if (DEBUG_LEVEL > 0){
SDFA_INSTANCE.showArrays(dct_ac,
tilesX*DCT_PARAMETERS.dct_size,
tilesY*DCT_PARAMETERS.dct_size,
......@@ -2800,6 +2803,7 @@ private Panel panel1,panel2,panel3,panel4,panel5,panel5a, panel6,panel7,panelPos
tilesY,
true,
DBG_IMP.getTitle()+"-DCT_DC");
}
double [][] idct_data = new double [dctdc_data.length][];
for (int chn=0; chn<idct_data.length;chn++){
idct_data[chn] = image_dtt.lapped_idctdc(
......@@ -2815,8 +2819,110 @@ private Panel panel1,panel2,panel3,panel4,panel5,panel5a, panel6,panel7,panelPos
true,
DBG_IMP.getTitle()+"-IDCTDC");
return;
/* ======================================================================== */
} else if (label.equals("Setup DCT parameters")) {
DCT_PARAMETERS.showDialog();
return;
/* ======================================================================== */
} else if (label.equals("DCT process files")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
if (EYESIS_DCT == null){
EYESIS_DCT = new EyesisDCT (
EYESIS_CORRECTIONS,
CORRECTION_PARAMETERS,
DCT_PARAMETERS);
if (DEBUG_LEVEL > 0){
System.out.println("Created new EyesisDCT instance, will need to read DCT kernels");
}
}
String configPath=null;
if (EYESIS_CORRECTIONS.correctionsParameters.saveSettings) {
configPath=EYESIS_CORRECTIONS.correctionsParameters.selectResultsDirectory(
true,
true);
if (configPath==null){
String msg="No results directory selected, command aborted";
System.out.println("Warning: "+msg);
IJ.showMessage("Warning",msg);
return;
}
configPath+=Prefs.getFileSeparator()+"autoconfig";
try {
saveTimestampedProperties(
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
} catch (Exception e){
String msg="Failed to save configuration to "+configPath+", command aborted";
System.out.println("Error: "+msg);
IJ.showMessage("Error",msg);
return;
}
}
EYESIS_CORRECTIONS.initSensorFiles(DEBUG_LEVEL);
int numChannels=EYESIS_CORRECTIONS.getNumChannels();
NONLIN_PARAMETERS.modifyNumChannels(numChannels);
CHANNEL_GAINS_PARAMETERS.modifyNumChannels(numChannels);
/*
if (CORRECTION_PARAMETERS.deconvolve && (NONLIN_PARAMETERS.noiseGainPower!=0)) {
EYESIS_CORRECTIONS.updateImageNoiseGains(
NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
CONVOLVE_FFT_SIZE, //int fftSize, // 128 - fft size, kernel size should be size/2
THREADS_MAX, // int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, // boolean updateStatus,
DEBUG_LEVEL); //int globalDebugLevel){
}
*/
if (!EYESIS_DCT.DCTKernelsAvailable()){
if (DEBUG_LEVEL > 0){
System.out.println("Reading/converting DCT kernels");
}
EYESIS_DCT.readDCTKernels(
DCT_PARAMETERS,
CONVOLVE_FFT_SIZE/2,
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
if (DEBUG_LEVEL > 1){
EYESIS_DCT.showKernels(); // show restored kernels
}
}
// EYESIS_CORRECTIONS.processChannelImages(
EYESIS_DCT.processDCTChannelImages(
// SPLIT_PARAMETERS, // EyesisCorrectionParameters.SplitParameters splitParameters,
DCT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
EQUIRECTANGULAR_PARAMETERS, // EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
CONVOLVE_FFT_SIZE, //int convolveFFTSize, // 128 - fft size, kernel size should be size/2
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
}
return;
/* ======================================================================== */
} else if (label.equals("DCT test 3")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
int n = 32;
......@@ -3431,6 +3537,9 @@ private Panel panel1,panel2,panel3,panel4,panel5,panel5a, panel6,panel7,panelPos
EYESIS_DCT.createDCTKernels(
DCT_PARAMETERS,
/*
EYESIS_CORRECTIONS.pixelMapping,
*/
CONVOLVE_FFT_SIZE/2,
THREADS_MAX,
UPDATE_STATUS, // update status info
......
......@@ -129,7 +129,7 @@ public class ImageDtt {
final int tilesY=dct_data.length/(dct_width*dct_size);
final int width= (tilesX+1)*dct_size;
final int height= (tilesY+1)*dct_size;
if (globalDebugLevel > 0) {
System.out.println("lapped_idct():dct_width="+dct_width);
System.out.println("lapped_idct():tilesX= "+tilesX);
System.out.println("lapped_idct():tilesY= "+tilesY);
......@@ -141,6 +141,7 @@ public class ImageDtt {
}
debug0 = Math.sqrt(debug0)/dct_data.length;
System.out.println("lapped_idct():debug0= "+debug0+" (dct_data.length= "+dct_data.length+")");
}
final double [] dpixels = new double[width*height];
final Thread[] threads = newThreadArray(threadsMax);
......@@ -282,20 +283,16 @@ public class ImageDtt {
dtt0.set_window(window_type);
final double [] dciii = dtt0.dttt_iii (dc, dct_size);
final double [] dciiie = dtt0.dttt_iiie (dc, dct_size);
if (color ==2) {
if ((globalDebugLevel > 0) && (color ==2)) {
double [][]dcx = {dc,dciii,dciiie, dtt0.dttt_ii(dc, dct_size),dtt0.dttt_iie(dc, dct_size)};
showDoubleFloatArrays sdfa_instance0 = new showDoubleFloatArrays(); // just for debugging?
sdfa_instance0.showArrays(dcx, dct_size, dct_size, true, "dcx");
}
/*
tile_out=dtt.dttt_iv (tile_folded, dct_mode, dct_size);
*/
if (globalDebugLevel > 0) {
System.out.println("lapped_dctdc(): width="+width+" height="+height);
}
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
......@@ -325,7 +322,7 @@ public class ImageDtt {
int asym_center = dct_kernels.asym_size/2; // 7 for 15
kernelTileY = kernel_margin + (tileY * dct_size) / dct_kernels.img_step;
kernelTileX = kernel_margin + (tileX * dct_size) / dct_kernels.img_step;
if ((tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
if ((globalDebugLevel > 0) && (tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
System.out.println("kernelTileY="+kernelTileY+" kernelTileX="+kernelTileX+" width="+width);
}
for (int i = 0; i < n2; i++){
......@@ -336,7 +333,7 @@ public class ImageDtt {
double [] asym_val = dct_kernels.asym_val[color][kernelTileY][kernelTileX];
for (int indx = 0; indx < asym_indx.length; indx++){
int xy = asym_indx[indx];
if ((tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
if ((globalDebugLevel > 0) && (tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
System.out.println("i="+i+" j="+j+" indx="+indx+" xy="+xy);
}
if (xy >= 0) {
......@@ -349,7 +346,7 @@ public class ImageDtt {
if (y >= height) y = (height - 2) + (y & 1);
if (x >= width) x = (width - 2) + (x & 1);
tile_in[i*n2 + j] += asym_val[indx] * dpixels[ y * width + x];
if ((tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
if ((globalDebugLevel > 0) && (tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
System.out.println("dy= "+dy+" dx="+dx+" x = "+x+" y="+y+" y*width + x="+(y*width + x));
System.out.println("asym_val["+indx+"]="+asym_val[indx]+
" dpixels["+(y * width + x)+"]="+ dpixels[ y * width + x]+
......@@ -371,26 +368,6 @@ public class ImageDtt {
int ady = (dy>=0)?dy:-dy;
int sgny = 1;
int y = i - dy;
/*
if (y < 0){
y = -1 -y;
sgny = -sgny;
if (y < 0) {
y = 0;
sgny = 0;
}
}
if (y >= n2){
y = 2*n2 - y;
sgny = -sgny;
if (y >= n2) {
y = n2-1;
sgny = 0;
}
}
*/
if (y < 0){
y = -1 -y;
sgny = -sgny;
......@@ -403,25 +380,6 @@ public class ImageDtt {
int adx = (dx >= 0)? dx:-dx;
int sgn = sgny;
int x = j - dx;
/*
if (x < 0){
x = -1 -x;
sgn = -sgn;
if (x <0) {
x = 0;
sgn = 0;
}
}
if (x >= n2){
x = 2*n2 - x;
sgn = -sgn;
if (x >= n2) {
x = n2-1;
sgn = 0;
}
}
*/
if (x < 0){
x = -1 -x;
sgn = -sgn;
......@@ -432,7 +390,7 @@ public class ImageDtt {
}
sym_conv[indx] += sgn*dir_sym[ady * dct_size + adx] * tile_in[y * n2 + x];
s0+=dir_sym[ady * dct_size + adx];
if ((tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2) &&
if ((globalDebugLevel > 0) && (tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2) &&
(i == dct_size) && (j== dct_size)) {
System.out.println("i="+i+" j="+j+" dy="+dy+" dx="+dx+" ady="+ady+" adx="+adx+
" y="+y+" x="+x+" sgny="+sgny+" sgn="+sgn+
......@@ -447,7 +405,7 @@ public class ImageDtt {
}
if ((tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
if ((globalDebugLevel > 0) && (tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
// if ((tileY == debug_tileY) && (tileX == debug_tileX)) {
double [][] pair = {tile_in, sym_conv};
sdfa_instance.showArrays(pair, n2, n2, true, "dconv-X"+tileX+"Y"+tileY+"C"+color);
......@@ -529,12 +487,13 @@ public class ImageDtt {
}
if ((tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
if ((dct_kernels!=null) && (tileY == debug_tileY) && (tileX == debug_tileX) && (color == 2)) {
double [][] dbg_tile = {
dct_kernels.st_direct[color][kernelTileY][kernelTileX],
dct_kernels.st_kernels[color][kernelTileY][kernelTileX],
tile_out_copy,
tile_out};
if (globalDebugLevel > 0){
sdfa_instance.showArrays(tile_in, n2, n2, "tile_in-X"+tileX+"Y"+tileY+"C"+color);
sdfa_instance.showArrays(dbg_tile, dct_size, dct_size, true, "dbg-X"+tileX+"Y"+tileY+"C"+color);
System.out.println("tileY="+tileY+" tileX="+tileX+" kernelTileY="+kernelTileY+" kernelTileX="+kernelTileX);
......@@ -552,6 +511,7 @@ public class ImageDtt {
}
System.out.println("s0="+s0+" s1="+s1+" s2="+s2+" s3="+s3);
}
}
System.arraycopy(tile_out, 0, dctdc_data[tileY][tileX], 0, tile_out.length);
dctdc_data[tileY][tileX][tile_out.length] = dc;
}
......@@ -623,7 +583,22 @@ public class ImageDtt {
}
}
}
if (globalDebugLevel>2) {
// normalize
double sum = 0;
for (int i = 0; i < dct_size; i++){
for (int j = 0; j < dct_size; j++){
double d = filter_direct[i*dct_size+j];
d*=Math.cos(Math.PI*i/(2*dct_size))*Math.cos(Math.PI*j/(2*dct_size));
if (i > 0) d*= 2.0;
if (j > 0) d*= 2.0;
sum +=d;
}
}
for (int i = 0; i<filter_direct.length; i++){
filter_direct[i] /= sum;
}
if (globalDebugLevel > -1) {
for (int i=0; i<filter_direct.length;i++){
System.out.println("dct_lpf_psf() "+i+": "+filter_direct[i]);
}
......@@ -631,14 +606,16 @@ public class ImageDtt {
DttRad2 dtt = new DttRad2(dct_size);
// final double [] filter= dtt.dttt_iii(filter_direct);
final double [] filter= dtt.dttt_iiie(filter_direct);
final double [] dbg_filter= dtt.dttt_ii(filter);
for (int i=0; i < filter.length;i++) filter[i] *= dct_size;
if (globalDebugLevel>2) {
if (globalDebugLevel > -1) {
for (int i=0; i<filter.length;i++){
System.out.println("dct_lpf_psf() "+i+": "+filter[i]);
}
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
double [][] ff = {filter_direct,filter};
double [][] ff = {filter_direct,filter,dbg_filter};
sdfa_instance.showArrays(ff, dct_size,dct_size, true, "filter_lpf");
}
......@@ -662,6 +639,119 @@ public class ImageDtt {
startAndJoin(threads);
}
public double [][][][] dct_color_convert(
final double [][][][] dctdc_data,
final double kr,
final double kb,
final double sigma_rb, // blur of channels 0,1 (r,b) in addition to 2 (g)
final double sigma_y, // blur of Y from G
final double sigma_color, // blur of Pr, Pb in addition to Y
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
final int tilesY=dctdc_data[0].length;
final int tilesX=dctdc_data[0][0].length;
final int nTiles=tilesX*tilesY;
final int dct_size = (int) Math.round(Math.sqrt(dctdc_data[0][0][0].length-1));
final int dct_len = dct_size*dct_size;
final double [][][][] yPrPb = new double [3][tilesY][tilesX][dct_len];
final double [][][] filters = new double [3][3][dct_len];
final double kg = 1.0 - kr - kb;
final double [][] filters_proto_direct = new double[3][dct_len];
final double [][] filters_proto = new double[3][];
System.out.println("dct_color_convert(): kr="+kr+" kg="+kg+" kb="+kb);
final double [] sigmas = {sigma_rb,sigma_y,sigma_color};
for (int n = 0; n<3; n++) {
double s = 0.0;
for (int i = 0; i < dct_size; i++){
for (int j = 0; j < dct_size; j++){
double d;
if (sigmas[n] == 0.0) d = ((i == 0) && (j==0))? 1.0:0.0;
else d = Math.exp(-(i*i+j*j)/(2*sigmas[n]));
filters_proto_direct[n][i*dct_size+j] = d;
if (i > 0) d*=2;
if (j > 0) d*=2;
s += d;
}
}
System.out.println("dct_color_convert(): sigmas["+n+"]="+sigmas[n]+", sum="+s);
for (int i = 0; i < dct_len; i++){
filters_proto_direct[n][i] /=s;
}
}
DttRad2 dtt = new DttRad2(dct_size);
for (int i = 0; i < filters_proto.length; i++){
filters_proto[i] = dtt.dttt_iiie(filters_proto_direct[i]);
System.out.println("filters_proto.length="+filters_proto.length+" filters_proto["+i+"].length="+filters_proto[i].length+" dct_len="+dct_len+" dct_size="+dct_size);
for (int j=0; j < dct_len; j++) filters_proto[i][j] *= 2*dct_size;
}
if (globalDebugLevel > 0) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
double [][] ff = {filters_proto_direct[0],filters_proto_direct[1],filters_proto_direct[2],filters_proto[0],filters_proto[1],filters_proto[2]};
sdfa_instance.showArrays(ff, dct_size,dct_size, true, "filters_proto");
}
double [][] coeff_arr ={
{ kr, kb, kg }, // Y = R*Kr+G*Hg+B*Kb
{ 0.5, -kb/(2.0*(1-kr)), -kg/(2.0*(1-kr))}, // Pr = R* 0.5 - G* Kg/(2.0*(1-Kr)) - B *Kb/(2.0*(1-Kr))
{-kr/(2.0*(1-kb)), 0.5, -kg/(2.0*(1-kb))}}; // Pb = B* 0.5 - G* Kg/(2.0*(1-Kb)) - R *Kr/(2.0*(1-Kb))
for (int k = 0; k < dct_len; k++){
for (int i = 0; i < coeff_arr.length; i++){
for (int j = 0; j < coeff_arr.length; j++){
filters[i][j][k] = coeff_arr[i][j]* filters_proto[1][k]; // minimal blur - for all sigma_y
if (i > 0){
filters[i][j][k] *= filters_proto[2][k]; // for Pr, Pb sigma_color
}
if (j <2){ // all but green
filters[i][j][k] *= filters_proto[0][k]; // for R,B sigma_rb
}
}
}
}
if (globalDebugLevel > 0) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
double [][] ff = {
filters[0][0], filters[0][1], filters[0][2],
filters[1][0], filters[1][1], filters[1][2],
filters[2][0], filters[2][1], filters[2][2]};
sdfa_instance.showArrays(ff, dct_size,dct_size, true, "filters");
}
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
int tileY,tileX;
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
tileY = nTile/tilesX;
tileX = nTile - tileY * tilesX;
for (int i = 0; i < filters.length; i++){
for (int k = 0; k <dct_len; k++){
yPrPb[i][tileY][tileX][k]=0.0;
for (int j = 0; j < filters[i].length; j++){
yPrPb[i][tileY][tileX][k] += filters[i][j][k] * dctdc_data[j][tileY][tileX][k];
// yPrPb[i][tileY][tileX][k] += filters[i][j][k] * dctdc_data[2][tileY][tileX][k]; // make it grey level (r=g=b)
}
}
}
}
}
};
}
startAndJoin(threads);
return yPrPb;
}
// Restore DC
......@@ -679,11 +769,12 @@ public class ImageDtt {
final int width= (tilesX+1)*dct_size;
final int height= (tilesY+1)*dct_size;
if (globalDebugLevel > 0) {
System.out.println("lapped_idct():tilesX= "+tilesX);
System.out.println("lapped_idct():tilesY= "+tilesY);
System.out.println("lapped_idct():width= "+width);
System.out.println("lapped_idct():height= "+height);
}
final double [] dpixels = new double[width*height];
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
......@@ -718,7 +809,8 @@ public class ImageDtt {
tileX = tiles_list[nser.get()][nTile][0];
tileY = tiles_list[nser.get()][nTile][1];
System.arraycopy(dctdc_data[tileY][tileX], 0, tile_in, 0, tile_in.length);
double dc = dctdc_data[tileY][tileX][tile_in.length];
// will get rid of the DC eventually
double dc = (dctdc_data[tileY][tileX].length > tile_in.length)?dctdc_data[tileY][tileX][tile_in.length]:0.0;
tile_dct=dtt.dttt_iv (tile_in, 0, dct_size);
for (int i = 0; i<tile_dct.length; i++) tile_dct[i] += dc;
tile_out=dtt.unfold_tile(tile_dct, dct_size);
......
......@@ -175,7 +175,28 @@ import ij.process.*;
}
}
}
public ImageStack makeStack(double[][] pixels, int width, int height) {
return makeStack(pixels, width,height, null);
}
public ImageStack makeStack(double[][] pixels, int width, int height, String [] titles) {
float [] fpixels;
ImageStack array_stack=new ImageStack(width,height);
for (int i=0;i<pixels.length;i++) if (pixels[i]!=null) {
if (pixels[i].length!=(width*height)){
System.out.println("showArrays(): pixels["+i+"].length="+pixels[i].length+" != width (+"+width+") * height("+height+")="+(width*height));
return null;
}
fpixels=new float[pixels[i].length];
for (int j=0;j<fpixels.length;j++) fpixels[j]=(float)pixels[i][j];
if (titles!=null){
array_stack.addSlice(titles[i], fpixels);
} else {
array_stack.addSlice("chn-"+i, fpixels);
}
}
return array_stack;
}
public ImagePlus [] makeArrays(double[][] pixels, int width, int height, String title) {
int i,j;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment