Commit 3e7f255e authored by Andrey Filippov's avatar Andrey Filippov

aberration correction with clt

parent 2e50bfb7
...@@ -1880,13 +1880,22 @@ public class EyesisCorrectionParameters { ...@@ -1880,13 +1880,22 @@ public class EyesisCorrectionParameters {
public int ishift_x = 0; // debug feature - shift source image by this pixels left public int ishift_x = 0; // debug feature - shift source image by this pixels left
public int ishift_y = 0; // debug feature - shift source image by this pixels down public int ishift_y = 0; // debug feature - shift source image by this pixels down
public double fat_zero = 0.0; // modify phase correlation to prevent division by very small numbers public double fat_zero = 0.0; // modify phase correlation to prevent division by very small numbers
public double corr_sigma =0.8; // LPF correlarion sigma public double corr_sigma = 0.8; // LPF correlarion sigma
public boolean norm_kern = true; // normalize kernels public boolean norm_kern = true; // normalize kernels
public double novignetting_r = 0.2644; // reg gain in the center of sensor calibration R (instead of vignetting)
public double novignetting_g = 0.3733; // green gain in the center of sensor calibration G
public double novignetting_b = 0.2034; // blue gain in the center of sensor calibration B
public double scale_r = 1.0; // extra gain correction after vignetting or nonvignetting, before other processing
public double scale_g = 1.0;
public double scale_b = 1.0;
public double vignetting_max = 0.4; // value in vignetting data to correspond to 1x in the kernel
public double vignetting_range = 5.0; // do not try to correct vignetting less than vignetting_max/vignetting_range
public int kernel_step = 16; // source kernels step in pixels (have 1 kernel margin on each side)
public CLTParameters(){} public CLTParameters(){}
public void setProperties(String prefix,Properties properties){ public void setProperties(String prefix,Properties properties){
properties.setProperty(prefix+"transform_size",this.transform_size+""); properties.setProperty(prefix+"transform_size", this.transform_size+"");
properties.setProperty(prefix+"clt_window", this.clt_window+""); properties.setProperty(prefix+"clt_window", this.clt_window+"");
properties.setProperty(prefix+"shift_x", this.shift_x+""); properties.setProperty(prefix+"shift_x", this.shift_x+"");
properties.setProperty(prefix+"shift_y", this.shift_y+""); properties.setProperty(prefix+"shift_y", this.shift_y+"");
...@@ -1898,6 +1907,17 @@ public class EyesisCorrectionParameters { ...@@ -1898,6 +1907,17 @@ public class EyesisCorrectionParameters {
properties.setProperty(prefix+"ishift_y", this.ishift_y+""); properties.setProperty(prefix+"ishift_y", this.ishift_y+"");
properties.setProperty(prefix+"fat_zero", this.fat_zero+""); properties.setProperty(prefix+"fat_zero", this.fat_zero+"");
properties.setProperty(prefix+"corr_sigma", this.corr_sigma+""); properties.setProperty(prefix+"corr_sigma", this.corr_sigma+"");
properties.setProperty(prefix+"norm_kern", this.norm_kern+"");
properties.setProperty(prefix+"novignetting_r", this.novignetting_r+"");
properties.setProperty(prefix+"novignetting_g", this.novignetting_g+"");
properties.setProperty(prefix+"novignetting_b", this.novignetting_b+"");
properties.setProperty(prefix+"scale_r", this.scale_r+"");
properties.setProperty(prefix+"scale_g", this.scale_g+"");
properties.setProperty(prefix+"scale_b", this.scale_b+"");
properties.setProperty(prefix+"vignetting_max", this.vignetting_max+"");
properties.setProperty(prefix+"vignetting_range", this.vignetting_range+"");
properties.setProperty(prefix+"kernel_step", this.kernel_step+"");
} }
public void getProperties(String prefix,Properties properties){ public void getProperties(String prefix,Properties properties){
if (properties.getProperty(prefix+"transform_size")!=null) this.transform_size=Integer.parseInt(properties.getProperty(prefix+"transform_size")); if (properties.getProperty(prefix+"transform_size")!=null) this.transform_size=Integer.parseInt(properties.getProperty(prefix+"transform_size"));
...@@ -1912,10 +1932,21 @@ public class EyesisCorrectionParameters { ...@@ -1912,10 +1932,21 @@ public class EyesisCorrectionParameters {
if (properties.getProperty(prefix+"ishift_y")!=null) this.ishift_y=Integer.parseInt(properties.getProperty(prefix+"ishift_y")); if (properties.getProperty(prefix+"ishift_y")!=null) this.ishift_y=Integer.parseInt(properties.getProperty(prefix+"ishift_y"));
if (properties.getProperty(prefix+"fat_zero")!=null) this.fat_zero=Double.parseDouble(properties.getProperty(prefix+"fat_zero")); if (properties.getProperty(prefix+"fat_zero")!=null) this.fat_zero=Double.parseDouble(properties.getProperty(prefix+"fat_zero"));
if (properties.getProperty(prefix+"corr_sigma")!=null) this.corr_sigma=Double.parseDouble(properties.getProperty(prefix+"corr_sigma")); if (properties.getProperty(prefix+"corr_sigma")!=null) this.corr_sigma=Double.parseDouble(properties.getProperty(prefix+"corr_sigma"));
if (properties.getProperty(prefix+"norm_kern")!=null) this.norm_kern=Boolean.parseBoolean(properties.getProperty(prefix+"norm_kern"));
if (properties.getProperty(prefix+"novignetting_r")!=null) this.novignetting_r=Double.parseDouble(properties.getProperty(prefix+"novignetting_r"));
if (properties.getProperty(prefix+"novignetting_g")!=null) this.novignetting_g=Double.parseDouble(properties.getProperty(prefix+"novignetting_g"));
if (properties.getProperty(prefix+"novignetting_b")!=null) this.novignetting_b=Double.parseDouble(properties.getProperty(prefix+"novignetting_b"));
if (properties.getProperty(prefix+"scale_r")!=null) this.scale_r=Double.parseDouble(properties.getProperty(prefix+"scale_r"));
if (properties.getProperty(prefix+"scale_g")!=null) this.scale_g=Double.parseDouble(properties.getProperty(prefix+"scale_g"));
if (properties.getProperty(prefix+"scale_b")!=null) this.scale_b=Double.parseDouble(properties.getProperty(prefix+"scale_b"));
if (properties.getProperty(prefix+"vignetting_max")!=null) this.vignetting_max=Double.parseDouble(properties.getProperty(prefix+"vignetting_max"));
if (properties.getProperty(prefix+"vignetting_range")!=null) this.vignetting_range=Double.parseDouble(properties.getProperty(prefix+"vignetting_range"));
if (properties.getProperty(prefix+"kernel_step")!=null) this.kernel_step=Integer.parseInt(properties.getProperty(prefix+"kernel_step"));
} }
public boolean showDialog() { public boolean showDialog() {
GenericDialog gd = new GenericDialog("Set DCT parameters"); GenericDialog gd = new GenericDialog("Set DCT parameters");
gd.addNumericField("DCT size", this.transform_size, 0); gd.addNumericField("Transform size (default 8)", this.transform_size, 0);
gd.addNumericField("Lapped transform window type (0- rectangular, 1 - sinus)", this.clt_window, 0); gd.addNumericField("Lapped transform window type (0- rectangular, 1 - sinus)", this.clt_window, 0);
gd.addNumericField("shift_x", this.shift_x, 4); gd.addNumericField("shift_x", this.shift_x, 4);
gd.addNumericField("shift_y", this.shift_y, 4); gd.addNumericField("shift_y", this.shift_y, 4);
...@@ -1927,6 +1958,17 @@ public class EyesisCorrectionParameters { ...@@ -1927,6 +1958,17 @@ public class EyesisCorrectionParameters {
gd.addNumericField("ishift_y: shift source image by this pixels down", this.ishift_y, 0); gd.addNumericField("ishift_y: shift source image by this pixels down", this.ishift_y, 0);
gd.addNumericField("Modify phase correlation to prevent division by very small numbers", this.fat_zero, 4); gd.addNumericField("Modify phase correlation to prevent division by very small numbers", this.fat_zero, 4);
gd.addNumericField("LPF correlarion sigma ", this.corr_sigma, 3); gd.addNumericField("LPF correlarion sigma ", this.corr_sigma, 3);
gd.addCheckbox ("Normalize kernels ", this.norm_kern);
gd.addNumericField("Reg gain in the center of sensor calibration R (instead of vignetting)", this.novignetting_r, 4);
gd.addNumericField("Green gain in the center of sensor calibration G (instead of vignetting)",this.novignetting_g, 4);
gd.addNumericField("Blue gain in the center of sensor calibration B (instead of vignetting)", this.novignetting_b, 4);
gd.addNumericField("Extra red correction to compensate for light temperature", this.scale_r, 4);
gd.addNumericField("Extra green correction to compensate for light temperature", this.scale_g, 4);
gd.addNumericField("Extra blue correction to compensate for light temperature", this.scale_b, 4);
gd.addNumericField("Value (max) in vignetting data to correspond to 1x in the kernel", this.vignetting_max, 3);
gd.addNumericField("Do not try to correct vignetting smaller than this fraction of max", this.vignetting_range, 3);
gd.addNumericField("Kernel step in pixels (has 1 kernel margin on each side)", this.kernel_step, 0);
WindowTools.addScrollBars(gd); WindowTools.addScrollBars(gd);
gd.showDialog(); gd.showDialog();
...@@ -1943,6 +1985,17 @@ public class EyesisCorrectionParameters { ...@@ -1943,6 +1985,17 @@ public class EyesisCorrectionParameters {
this.ishift_y= (int) gd.getNextNumber(); this.ishift_y= (int) gd.getNextNumber();
this.fat_zero = gd.getNextNumber(); this.fat_zero = gd.getNextNumber();
this.corr_sigma = gd.getNextNumber(); this.corr_sigma = gd.getNextNumber();
this.norm_kern= gd.getNextBoolean();
this.novignetting_r= gd.getNextNumber();
this.novignetting_g= gd.getNextNumber();
this.novignetting_b= gd.getNextNumber();
this.scale_r= gd.getNextNumber();
this.scale_g= gd.getNextNumber();
this.scale_b= gd.getNextNumber();
this.vignetting_max= gd.getNextNumber();
this.vignetting_range= gd.getNextNumber();
this.kernel_step= (int) gd.getNextNumber();
return true; return true;
} }
} }
...@@ -2272,17 +2325,16 @@ public class EyesisCorrectionParameters { ...@@ -2272,17 +2325,16 @@ public class EyesisCorrectionParameters {
this.antiwindow= gd.getNextBoolean(); this.antiwindow= gd.getNextBoolean();
this.skip_sym= gd.getNextBoolean(); this.skip_sym= gd.getNextBoolean();
this.convolve_direct= gd.getNextBoolean(); this.convolve_direct= gd.getNextBoolean();
this.novignetting_r= gd.getNextNumber(); this.novignetting_r= gd.getNextNumber();
this.novignetting_g= gd.getNextNumber(); this.novignetting_g= gd.getNextNumber();
this.novignetting_b= gd.getNextNumber(); this.novignetting_b= gd.getNextNumber();
this.scale_r= gd.getNextNumber(); this.scale_r= gd.getNextNumber();
this.scale_g= gd.getNextNumber(); this.scale_g= gd.getNextNumber();
this.scale_b= gd.getNextNumber(); this.scale_b= gd.getNextNumber();
this.vignetting_max= gd.getNextNumber(); this.vignetting_max= gd.getNextNumber();
this.vignetting_range= gd.getNextNumber(); this.vignetting_range= gd.getNextNumber();
this.post_debayer= gd.getNextBoolean(); this.post_debayer= gd.getNextBoolean();
this.color_DCT= gd.getNextBoolean(); this.color_DCT= gd.getNextBoolean();
this.sigma_rb= gd.getNextNumber(); this.sigma_rb= gd.getNextNumber();
......
...@@ -2078,6 +2078,51 @@ public class EyesisCorrections { ...@@ -2078,6 +2078,51 @@ public class EyesisCorrections {
return outStack; return outStack;
} }
// return 2-d double array instead of the stack
public double [][]bayerToDoubleStack(ImagePlus imp, // source bayer image, linearized, 32-bit (float))
EyesisCorrectionParameters.SplitParameters splitParameters){ // if null - no margins, no oversample
if (imp==null) return null;
boolean adv = splitParameters != null;
int oversample = adv? splitParameters.oversample : 1;
int addTop= adv?splitParameters.addTop: 0;
int addLeft= adv?splitParameters.addLeft: 0;
int addBottom= adv?splitParameters.addBottom: 0;
int addRight= adv?splitParameters.addRight: 0;
String [] chnNames={"Red","Blue","Green"}; //Different sequence than RGB!!
int nChn=chnNames.length;
ImageProcessor ip=imp.getProcessor();
int inWidth=imp.getWidth();
int inHeight=imp.getHeight();
int outHeight=inHeight*oversample + addTop + addBottom;
int outWidth= inWidth* oversample + addLeft + addRight;
int outLength=outWidth*outHeight;
double [][] outPixels=new double[nChn][outLength];
float [] pixels = (float[]) ip.getPixels();
int chn,y,x,i,index;
int bayerPeriod=2*oversample;
int ovrWidth= inWidth * oversample;
int ovrHeight= inHeight * oversample;
for (chn=0;chn<nChn;chn++) for (i=0;i<outPixels[chn].length;i++) outPixels[chn][i]=0.0f;
/* Can be optimized - now it calculate input address for all those 0-es */
for (index=0; index<outLength; index++) {
y=(index / outWidth) - addTop;
x=(index % outWidth) - addLeft;
if (y<0) y= (bayerPeriod-((-y) % bayerPeriod))%bayerPeriod;
else if (y>=ovrHeight) y= ovrHeight-bayerPeriod +((y-ovrHeight) % bayerPeriod);
if (x<0) x= (bayerPeriod-((-x) % bayerPeriod))%bayerPeriod;
else if (x>=ovrWidth) x= ovrWidth-bayerPeriod +((x-ovrWidth) % bayerPeriod);
if (((y% oversample)==0) && ((x% oversample)==0)) {
x/=oversample;
y/=oversample;
chn=((x&1)==(y&1))?2:(((x&1)!=0)?0:1);
outPixels[chn][index]=pixels[y*inWidth+x];
}
}
return outPixels;
}
//double [] DENOISE_MASK=null; //double [] DENOISE_MASK=null;
......
...@@ -39,6 +39,7 @@ public class EyesisDCT { ...@@ -39,6 +39,7 @@ public class EyesisDCT {
public EyesisCorrectionParameters.DCTParameters dctParameters = null; public EyesisCorrectionParameters.DCTParameters dctParameters = null;
public DCTKernels [] kernels = null; public DCTKernels [] kernels = null;
double [][][][][][] clt_kernels = null; double [][][][][][] clt_kernels = null;
public int extra_items = 8; // number of extra items saved with kernels (center offset (partial, full, derivatives)
public ImagePlus eyesisKernelImage = null; public ImagePlus eyesisKernelImage = null;
public long startTime; public long startTime;
...@@ -83,6 +84,11 @@ public class EyesisDCT { ...@@ -83,6 +84,11 @@ public class EyesisDCT {
return kernels != null; return kernels != null;
} }
public boolean CLTKernelsAvailable(){
return clt_kernels != null;
}
public DCTKernels calculateDCTKernel ( public DCTKernels calculateDCTKernel (
final ImageStack kernelStack, // first stack with 3 colors/slices convolution kernels final ImageStack kernelStack, // first stack with 3 colors/slices convolution kernels
final int kernelSize, // 64 final int kernelSize, // 64
...@@ -335,7 +341,6 @@ public class EyesisDCT { ...@@ -335,7 +341,6 @@ public class EyesisDCT {
return true; return true;
} }
public double [][][][][] calculateCLTKernel ( // per color/per tileY/ per tileX/per quadrant (plus offset as 5-th)/per pixel public double [][][][][] calculateCLTKernel ( // per color/per tileY/ per tileX/per quadrant (plus offset as 5-th)/per pixel
final ImageStack kernelStack, // first stack with 3 colors/slices convolution kernels final ImageStack kernelStack, // first stack with 3 colors/slices convolution kernels
final int kernelSize, // 64 final int kernelSize, // 64
...@@ -359,7 +364,7 @@ public class EyesisDCT { ...@@ -359,7 +364,7 @@ public class EyesisDCT {
for (int n = 0; n<4; n++){ for (int n = 0; n<4; n++){
clt_kernels[chn][tileY][tileX][n] = new double [dtt_len]; clt_kernels[chn][tileY][tileX][n] = new double [dtt_len];
} }
clt_kernels[chn][tileY][tileX][4] = new double [2]; clt_kernels[chn][tileY][tileX][4] = new double [extra_items];
} }
} }
} }
...@@ -378,14 +383,14 @@ public class EyesisDCT { ...@@ -378,14 +383,14 @@ public class EyesisDCT {
} }
final long startTime = System.nanoTime(); final long startTime = System.nanoTime();
System.out.println("calculateDCTKernel():numberOfKernels="+numberOfKernels); System.out.println("calculateCLTKernel():numberOfKernels="+numberOfKernels);
for (int ithread = 0; ithread < threads.length; ithread++) { for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() { threads[ithread] = new Thread() {
public void run() { public void run() {
float [] kernelPixels= null; // will be initialized at first use NOT yet? float [] kernelPixels= null; // will be initialized at first use NOT yet?
double [] kernel= new double[kernelSize*kernelSize]; double [] kernel= new double[kernelSize*kernelSize];
int centered_len = (2*dtt_size-1) * (2*dtt_size-1); int centered_len = (2*dtt_size-1) * (2*dtt_size-1);
double [] kernel_centered = new double [centered_len +2]; double [] kernel_centered = new double [centered_len + extra_items];
ImageDtt image_dtt = new ImageDtt(); ImageDtt image_dtt = new ImageDtt();
int chn,tileY,tileX; int chn,tileY,tileX;
DttRad2 dtt = new DttRad2(dtt_size); DttRad2 dtt = new DttRad2(dtt_size);
...@@ -412,22 +417,31 @@ public class EyesisDCT { ...@@ -412,22 +417,31 @@ public class EyesisDCT {
if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) { if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) {
int length=kernel.length; int length=kernel.length;
int size=(int) Math.sqrt(length); int size=(int) Math.sqrt(length);
double s =0.0;
for (int i=0;i<kernel.length;i++) s+=kernel[i];
System.out.println("calculateCLTKernel(): sum(kernel_raw)="+s);
sdfa_instance.showArrays( sdfa_instance.showArrays(
kernel, kernel,
size, size,
size, size,
"raw_kernel-"+chn+"-X"+(clt_parameters.tileX/2)+"-Y"+(clt_parameters.tileY/2)); "raw_kernel-"+chn+"-X"+(clt_parameters.tileX/2)+"-Y"+(clt_parameters.tileY/2));
} }
// now has 64x64 // now has 64x64
image_dtt.clt_convert_double_kernel( // converts double resolution kernel image_dtt.clt_convert_double_kernel( // converts double resolution kernel
kernel, // double [] src_kernel, // kernel, // double [] src_kernel, //
kernel_centered, // double [] dst_kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) +2 size - kernel and dx, dy to the nearest 1/2 pixels kernel_centered, // double [] dst_kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) + extra_items size - kernel and dx, dy to the nearest 1/2 pixels
// also actual full center shifts in sensor pixels
kernelSize, // int src_size, // 64 kernelSize, // int src_size, // 64
dtt_size); // 8 dtt_size); // 8
if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) { if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) {
int length=kernel_centered.length; int length=kernel_centered.length;
int size=(int) Math.sqrt(length); int size=(int) Math.sqrt(length);
double s =0.0;
int klen = (2*dtt_size-1) * (2*dtt_size-1);
for (int i = 0; i < klen; i++) s += kernel_centered[i];
System.out.println("calculateCLTKernel(): sum(kernel_centered)="+s);
sdfa_instance.showArrays( sdfa_instance.showArrays(
kernel_centered, kernel_centered,
size, size,
...@@ -437,25 +451,32 @@ public class EyesisDCT { ...@@ -437,25 +451,32 @@ public class EyesisDCT {
if (norm_sym_weights != null) { if (norm_sym_weights != null) {
image_dtt.clt_normalize_kernel( // image_dtt.clt_normalize_kernel( //
kernel_centered, // double [] kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) +2 size (last 2 are not modified) kernel_centered, // double [] kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) + extra_items size (last (2*dtt_size-1) are not modified)
norm_sym_weights, // double [] window, // normalizes result kernel * window to have sum of elements == 1.0 norm_sym_weights, // double [] window, // normalizes result kernel * window to have sum of elements == 1.0
dtt_size); // 8 dtt_size,
(globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)); // 8
if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) { if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) {
int length=kernel_centered.length; int length=kernel_centered.length;
int size=(int) Math.sqrt(length); int size=(int) Math.sqrt(length);
double s =0.0;
int klen = (2*dtt_size-1) * (2*dtt_size-1);
for (int i = 0; i < klen; i++) s += kernel_centered[i];
System.out.println("calculateCLTKernel(): sum(kernel_normalized)="+s);
sdfa_instance.showArrays( sdfa_instance.showArrays(
kernel_centered, kernel_centered,
size, size,
size, size,
"kernel_normalized-"+chn+"-X"+(clt_parameters.tileX/2)+"-Y"+(clt_parameters.tileY/2)); "kernel_normalized-"+chn+"-X"+(clt_parameters.tileX/2)+"-Y"+(clt_parameters.tileY/2));
} }
} }
image_dtt.clt_symmetrize_kernel( // image_dtt.clt_symmetrize_kernel( //
kernel_centered, // double [] kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) +2 size (last 2 are not modified) kernel_centered, // double [] kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) +4 size (last 4 are not modified)
clt_kernels[chn][tileY][tileX], // double [][] sym_kernels, // set of 4 SS, AS, SA, AA kdernels, each dtt_size * dtt_size (may have 5-th with center shift clt_kernels[chn][tileY][tileX], // double [][] sym_kernels, // set of 4 SS, AS, SA, AA kdernels, each dtt_size * dtt_size (may have 5-th with center shift
dtt_size); // 8 dtt_size); // 8
clt_kernels[chn][tileY][tileX][4][0] = kernel_centered [centered_len + 0]; for (int i = 0; i < extra_items; i++){
clt_kernels[chn][tileY][tileX][4][1] = kernel_centered [centered_len + 1]; clt_kernels[chn][tileY][tileX][4][i] = kernel_centered [centered_len + i];
}
if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) { if ((globalDebugLevel > 0) && (tileY == clt_parameters.tileY/2) && (tileX == clt_parameters.tileX/2)) {
double [][] dbg_clt = { double [][] dbg_clt = {
clt_kernels[chn][tileY][tileX][0], clt_kernels[chn][tileY][tileX][0],
...@@ -497,7 +518,9 @@ public class EyesisDCT { ...@@ -497,7 +518,9 @@ public class EyesisDCT {
"tileX = "+clt_parameters.tileX+" ("+(clt_parameters.tileX/2)+") "+ "tileX = "+clt_parameters.tileX+" ("+(clt_parameters.tileX/2)+") "+
"tileY = "+clt_parameters.tileY+" ("+(clt_parameters.tileY/2)+") "+ "tileY = "+clt_parameters.tileY+" ("+(clt_parameters.tileY/2)+") "+
"center_x = "+clt_kernels[chn][tileY][tileX][4][0]+", "+ "center_x = "+clt_kernels[chn][tileY][tileX][4][0]+", "+
"center_y = "+clt_kernels[chn][tileY][tileX][4][1]); "center_y = "+clt_kernels[chn][tileY][tileX][4][1]+", "+
"full_dx = "+clt_kernels[chn][tileY][tileX][4][2]+", "+
"full_dy = "+clt_kernels[chn][tileY][tileX][4][3]);
} }
} }
} }
...@@ -506,11 +529,17 @@ public class EyesisDCT { ...@@ -506,11 +529,17 @@ public class EyesisDCT {
ImageDtt.startAndJoin(threads); ImageDtt.startAndJoin(threads);
if (globalDebugLevel > 1) System.out.println("Threads done at "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3)); if (globalDebugLevel > 1) System.out.println("Threads done at "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3));
System.out.println("1.Threads done at "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3)); System.out.println("1.Threads done at "+IJ.d2s(0.000000001*(System.nanoTime()-startTime),3));
/* prepare result stack to return */ // Calculate differential offsets to interpolate for tiles between kernel centers
ImageDtt image_dtt = new ImageDtt();
image_dtt.clt_fill_coord_corr(
clt_parameters.kernel_step, // final int kern_step, // distance between kernel centers, in pixels.
clt_kernels, // final double [][][][] clt_data,
threadsMax, // maximal number of threads to launch
globalDebugLevel);
return clt_kernels; return clt_kernels;
} }
public double [][] flattenCLTKernels ( // per color, save 4 kernelas and displacement as (2*dtt_size+1)*(2*dtt_size) tiles in an image (last row - shift x,y) public double [][] flattenCLTKernels ( // per color, save 4 kernelas and displacement as (2*dtt_size+1)*(2*dtt_size) tiles in an image (last row - 4 values shift x,y)
final double [][][][][] kernels, // per color/per tileY/ per tileX/per quadrant (plus offset as 5-th)/per pixel final double [][][][][] kernels, // per color/per tileY/ per tileX/per quadrant (plus offset as 5-th)/per pixel
final int threadsMax, // maximal number of threads to launch final int threadsMax, // maximal number of threads to launch
final boolean updateStatus, final boolean updateStatus,
...@@ -575,7 +604,7 @@ public class EyesisDCT { ...@@ -575,7 +604,7 @@ public class EyesisDCT {
0, 0,
clt_flat[chn], clt_flat[chn],
(tileY*tileHeight + 2 * dtt_size) * width + (tileX * tileWidth), (tileY*tileHeight + 2 * dtt_size) * width + (tileX * tileWidth),
2); extra_items);
} }
} }
}; };
...@@ -654,7 +683,7 @@ public class EyesisDCT { ...@@ -654,7 +683,7 @@ public class EyesisDCT {
for (int n = 0; n<4; n++){ for (int n = 0; n<4; n++){
clt_kernels[chn][tileY][tileX][n] = new double [dtt_len]; clt_kernels[chn][tileY][tileX][n] = new double [dtt_len];
} }
clt_kernels[chn][tileY][tileX][4] = new double [2]; clt_kernels[chn][tileY][tileX][4] = new double [extra_items];
} }
} }
} }
...@@ -685,8 +714,9 @@ public class EyesisDCT { ...@@ -685,8 +714,9 @@ public class EyesisDCT {
clt_kernels[chn][tileY][tileX][3][indx] = flat_kernels[chn][baddr + dtt_size * width + dtt_size]; clt_kernels[chn][tileY][tileX][3][indx] = flat_kernels[chn][baddr + dtt_size * width + dtt_size];
} }
} }
clt_kernels[chn][tileY][tileX][4][0] = flat_kernels[chn][(tileY*tileHeight + 2 * dtt_size) * width + (tileX * tileWidth)]; for (int i = 0; i < extra_items; i ++) {
clt_kernels[chn][tileY][tileX][4][1] = flat_kernels[chn][(tileY*tileHeight + 2 * dtt_size) * width + (tileX * tileWidth) + 1]; clt_kernels[chn][tileY][tileX][4][i] = flat_kernels[chn][(tileY*tileHeight + 2 * dtt_size) * width + (tileX * tileWidth) + i];
}
} }
} }
}; };
...@@ -697,15 +727,6 @@ public class EyesisDCT { ...@@ -697,15 +727,6 @@ public class EyesisDCT {
/* prepare result stack to return */ /* prepare result stack to return */
return clt_kernels; return clt_kernels;
} }
/*
System.out.println("calculateCLTKernel() chn="+chn+" "+
"tileX = "+clt_parameters.tileX+" ("+(clt_parameters.tileX/2)+") "+
"tileY = "+clt_parameters.tileY+" ("+(clt_parameters.tileY/2)+") "+
"center_x = "+clt_kernels[chn][tileY][tileX][4][0]+", "+
"center_y = "+clt_kernels[chn][tileY][tileX][4][1]);
*/
public boolean createCLTKernels( public boolean createCLTKernels(
...@@ -750,6 +771,7 @@ public class EyesisDCT { ...@@ -750,6 +771,7 @@ public class EyesisDCT {
threadsMax, // maximal number of threads to launch threadsMax, // maximal number of threads to launch
updateStatus, updateStatus,
debugLevel); // update status info debugLevel); // update status info
double [][] flat_kernels = flattenCLTKernels ( // per color, save 4 kernelas and displacement as (2*dtt_size+1)*(2*dtt_size) tiles in an image (last row - shift x,y) double [][] flat_kernels = flattenCLTKernels ( // per color, save 4 kernelas and displacement as (2*dtt_size+1)*(2*dtt_size) tiles in an image (last row - shift x,y)
kernels, // per color/per tileY/ per tileX/per quadrant (plus offset as 5-th)/per pixel kernels, // per color/per tileY/ per tileX/per quadrant (plus offset as 5-th)/per pixel
threadsMax, // maximal number of threads to launch threadsMax, // maximal number of threads to launch
...@@ -1261,7 +1283,7 @@ public class EyesisDCT { ...@@ -1261,7 +1283,7 @@ public class EyesisDCT {
} else { } else {
scale_asym = 1.0; scale_asym = 1.0;
} }
// Compensate for Bayer pattern where there are twice less R,B than G // Compensate for Bayer pattern where there are twice less R,B than G green red blue
double k = ((nc == 2)? 1.0:2.0) / scale_asym; double k = ((nc == 2)? 1.0:2.0) / scale_asym;
for (int i = 0; i < kernels[chn].asym_val[nc][tileY][tileX].length;i++){ for (int i = 0; i < kernels[chn].asym_val[nc][tileY][tileX].length;i++){
// kernels[chn].asym_val[nc][tileY][tileX][i] /= scale_asym; // kernels[chn].asym_val[nc][tileY][tileX][i] /= scale_asym;
...@@ -2463,6 +2485,657 @@ public class EyesisDCT { ...@@ -2463,6 +2485,657 @@ public class EyesisDCT {
} }
} }
public void processCLTChannelImages(
// EyesisCorrectionParameters.DCTParameters dct_parameters,
EyesisCorrectionParameters.CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.NonlinParameters nonlinParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CorrectionColorProc.ColorGainsParameters channelGainParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
int convolveFFTSize, // 128 - fft size, kernel size should be size/2
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel)
{
this.startTime=System.nanoTime();
String [] sourceFiles=correctionsParameters.getSourcePaths();
boolean [] enabledFiles=new boolean[sourceFiles.length];
for (int i=0;i<enabledFiles.length;i++) enabledFiles[i]=false;
int numFilesToProcess=0;
int numImagesToProcess=0;
for (int nFile=0;nFile<enabledFiles.length;nFile++){
if ((sourceFiles[nFile]!=null) && (sourceFiles[nFile].length()>1)) {
int [] channels={correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile])};
if (correctionsParameters.isJP4()){
int subCamera= channels[0]- correctionsParameters.firstSubCamera; // to match those in the sensor files
// removeUnusedSensorData should be off!?
channels=this.eyesisCorrections.pixelMapping.channelsForSubCamera(subCamera);
}
if (channels!=null){
for (int i=0;i<channels.length;i++) if (eyesisCorrections.isChannelEnabled(channels[i])){
if (!enabledFiles[nFile]) numFilesToProcess++;
enabledFiles[nFile]=true;
numImagesToProcess++;
}
}
}
}
if (numFilesToProcess==0){
System.out.println("No files to process (of "+sourceFiles.length+")");
return;
} else {
if (debugLevel>0) System.out.println(numFilesToProcess+ " files to process (of "+sourceFiles.length+"), "+numImagesToProcess+" images to process");
}
double [] referenceExposures=eyesisCorrections.calcReferenceExposures(debugLevel); // multiply each image by this and divide by individual (if not NaN)
int [][] fileIndices=new int [numImagesToProcess][2]; // file index, channel number
int index=0;
for (int nFile=0;nFile<enabledFiles.length;nFile++){
if ((sourceFiles[nFile]!=null) && (sourceFiles[nFile].length()>1)) {
int [] channels={correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile])};
if (correctionsParameters.isJP4()){
int subCamera= channels[0]- correctionsParameters.firstSubCamera; // to match those in the sensor files
channels=eyesisCorrections.pixelMapping.channelsForSubCamera(subCamera);
}
if (channels!=null){
for (int i=0;i<channels.length;i++) if (eyesisCorrections.isChannelEnabled(channels[i])){
fileIndices[index ][0]=nFile;
fileIndices[index++][1]=channels[i];
}
}
}
}
for (int iImage=0;iImage<fileIndices.length;iImage++){
int nFile=fileIndices[iImage][0];
ImagePlus imp_src=null;
// int srcChannel=correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile]);
int srcChannel=fileIndices[iImage][1];
if (correctionsParameters.isJP4()){
int subchannel=eyesisCorrections.pixelMapping.getSubChannel(srcChannel);
if (this.correctionsParameters.swapSubchannels01) {
switch (subchannel){
case 0: subchannel=1; break;
case 1: subchannel=0; break;
}
}
if (debugLevel>0) System.out.println("Processing channel "+fileIndices[iImage][1]+" - subchannel "+subchannel+" of "+sourceFiles[nFile]);
ImagePlus imp_composite=eyesisCorrections.JP4_INSTANCE.open(
"", // path,
sourceFiles[nFile],
"", //arg - not used in JP46 reader
true, // un-apply camera color gains
null, // new window
false); // do not show
imp_src=eyesisCorrections.JP4_INSTANCE.demuxImage(imp_composite, subchannel);
if (imp_src==null) imp_src=imp_composite; // not a composite image
// do we need to add any properties?
} else {
imp_src=new ImagePlus(sourceFiles[nFile]);
// (new JP46_Reader_camera(false)).decodeProperiesFromInfo(imp_src); // decode existent properties from info
eyesisCorrections.JP4_INSTANCE.decodeProperiesFromInfo(imp_src); // decode existent properties from info
if (debugLevel>0) System.out.println("Processing "+sourceFiles[nFile]);
}
double scaleExposure=1.0;
if (!Double.isNaN(referenceExposures[nFile]) && (imp_src.getProperty("EXPOSURE")!=null)){
scaleExposure=referenceExposures[nFile]/Double.parseDouble((String) imp_src.getProperty("EXPOSURE"));
// imp_src.setProperty("scaleExposure", scaleExposure); // it may already have channel
if (debugLevel>0) System.out.println("Will scale intensity (to compensate for exposure) by "+scaleExposure);
}
imp_src.setProperty("name", correctionsParameters.getNameFromSourceTiff(sourceFiles[nFile]));
imp_src.setProperty("channel", srcChannel); // it may already have channel
imp_src.setProperty("path", sourceFiles[nFile]); // it may already have channel
// ImagePlus result=processChannelImage( // returns ImagePlus, but it already should be saved/shown
processCLTChannelImage( // returns ImagePlus, but it already should be saved/shown
imp_src, // should have properties "name"(base for saving results), "channel","path"
clt_parameters,
debayerParameters,
nonlinParameters,
colorProcParameters,
channelGainParameters,
rgbParameters,
convolveFFTSize, // 128 - fft size, kernel size should be size/2
scaleExposure,
threadsMax, // maximal number of threads to launch
updateStatus,
debugLevel);
// warp result (add support for different color modes)
if (this.correctionsParameters.equirectangular){
if (equirectangularParameters.clearFullMap) eyesisCorrections.pixelMapping.deleteEquirectangularMapFull(srcChannel); // save memory? //removeUnusedSensorData - no, use equirectangular specific settings
if (equirectangularParameters.clearAllMaps) eyesisCorrections.pixelMapping.deleteEquirectangularMapAll(srcChannel); // save memory? //removeUnusedSensorData - no, use equirectangular specific settings
}
//pixelMapping
Runtime.getRuntime().gc();
if (debugLevel >-1) System.out.println("Processing image "+(iImage+1)+" (of "+fileIndices.length+") finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
if (eyesisCorrections.stopRequested.get()>0) {
System.out.println("User requested stop");
return;
}
}
System.out.println("Processing "+fileIndices.length+" files finished at "+
IJ.d2s(0.000000001*(System.nanoTime()-this.startTime),3)+" sec, --- Free memory="+Runtime.getRuntime().freeMemory()+" (of "+Runtime.getRuntime().totalMemory()+")");
}
public ImagePlus processCLTChannelImage(
ImagePlus imp_src, // should have properties "name"(base for saving results), "channel","path"
// EyesisCorrectionParameters.DCTParameters dct_parameters,
EyesisCorrectionParameters.CLTParameters clt_parameters,
EyesisCorrectionParameters.DebayerParameters debayerParameters,
EyesisCorrectionParameters.NonlinParameters nonlinParameters,
EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CorrectionColorProc.ColorGainsParameters channelGainParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
int convolveFFTSize, // 128 - fft size, kernel size should be size/2
double scaleExposure,
final int threadsMax, // maximal number of threads to launch
final boolean updateStatus,
final int debugLevel){
boolean advanced=this.correctionsParameters.zcorrect || this.correctionsParameters.equirectangular;
boolean crop= advanced? true: this.correctionsParameters.crop;
boolean rotate= advanced? false: this.correctionsParameters.rotate;
double JPEG_scale= advanced? 1.0: this.correctionsParameters.JPEG_scale;
boolean toRGB= advanced? true: this.correctionsParameters.toRGB;
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
// may use this.StartTime to report intermediate steps execution times
String name=(String) imp_src.getProperty("name");
// int channel= Integer.parseInt((String) imp_src.getProperty("channel"));
int channel= (Integer) imp_src.getProperty("channel");
String path= (String) imp_src.getProperty("path");
if (this.correctionsParameters.pixelDefects && (eyesisCorrections.defectsXY!=null)&& (eyesisCorrections.defectsXY[channel]!=null)){
// apply pixel correction
int numApplied= eyesisCorrections.correctDefects(
imp_src,
channel,
debugLevel);
if ((debugLevel>0) && (numApplied>0)) { // reduce verbosity after verified defect correction works
System.out.println("Corrected "+numApplied+" pixels in "+path);
}
}
if (this.correctionsParameters.vignetting){
if ((eyesisCorrections.channelVignettingCorrection==null) || (channel<0) || (channel>=eyesisCorrections.channelVignettingCorrection.length) || (eyesisCorrections.channelVignettingCorrection[channel]==null)){
System.out.println("No vignetting data for channel "+channel);
return null;
}
float [] pixels=(float []) imp_src.getProcessor().getPixels();
if (pixels.length!=eyesisCorrections.channelVignettingCorrection[channel].length){
System.out.println("Vignetting data for channel "+channel+" has "+eyesisCorrections.channelVignettingCorrection[channel].length+" pixels, image "+path+" has "+pixels.length);
return null;
}
// TODO: Move to do it once:
double min_non_zero = 0.0;
for (int i=0;i<pixels.length;i++){
double d = eyesisCorrections.channelVignettingCorrection[channel][i];
if ((d > 0.0) && ((min_non_zero == 0) || (min_non_zero > d))){
min_non_zero = d;
}
}
double max_vign_corr = clt_parameters.vignetting_range*min_non_zero;
System.out.println("Vignetting data: channel="+channel+", min = "+min_non_zero);
for (int i=0;i<pixels.length;i++){
double d = eyesisCorrections.channelVignettingCorrection[channel][i];
if (d > max_vign_corr) d = max_vign_corr;
pixels[i]*=d;
}
// Scale here, combine with vignetting later?
int width = imp_src.getWidth();
int height = imp_src.getHeight();
for (int y = 0; y < height-1; y+=2){
for (int x = 0; x < width-1; x+=2){
pixels[y*width+x ] *= clt_parameters.scale_g;
pixels[y*width+x+width+1] *= clt_parameters.scale_g;
pixels[y*width+x +1] *= clt_parameters.scale_r;
pixels[y*width+x+width ] *= clt_parameters.scale_b;
}
}
} else { // assuming GR/BG pattern
System.out.println("Applying fixed color gain correction parameters: Gr="+
clt_parameters.novignetting_r+", Gg="+clt_parameters.novignetting_g+", Gb="+clt_parameters.novignetting_b);
float [] pixels=(float []) imp_src.getProcessor().getPixels();
int width = imp_src.getWidth();
int height = imp_src.getHeight();
double kr = clt_parameters.scale_r/clt_parameters.novignetting_r;
double kg = clt_parameters.scale_g/clt_parameters.novignetting_g;
double kb = clt_parameters.scale_b/clt_parameters.novignetting_b;
for (int y = 0; y < height-1; y+=2){
for (int x = 0; x < width-1; x+=2){
pixels[y*width+x ] *= kg;
pixels[y*width+x+width+1] *= kg;
pixels[y*width+x +1] *= kr;
pixels[y*width+x+width ] *= kb;
}
}
}
String title=name+"-"+String.format("%02d", channel);
ImagePlus result=imp_src;
if (debugLevel>1) System.out.println("processing: "+path);
result.setTitle(title+"RAW");
if (!this.correctionsParameters.split){
eyesisCorrections.saveAndShow(result, this.correctionsParameters);
return result;
}
// Generate split parameters for DCT processing mode
EyesisCorrectionParameters.SplitParameters splitParameters = new EyesisCorrectionParameters.SplitParameters(
1, // oversample; // currently source kernels are oversampled
clt_parameters.transform_size/2, // addLeft
clt_parameters.transform_size/2, // addTop
clt_parameters.transform_size/2, // addRight
clt_parameters.transform_size/2 // addBottom
);
// Split into Bayer components, oversample, increase canvas
double [][] double_stack = eyesisCorrections.bayerToDoubleStack(
result, // source Bayer image, linearized, 32-bit (float))
null); // no margins, no oversample
// ImageStack stack= eyesisCorrections.bayerToStack(
// result, // source Bayer image, linearized, 32-bit (float))
// splitParameters);
String titleFull=title+"-SPLIT";
if (debugLevel > -1){
double [] chn_avg = {0.0,0.0,0.0};
int width = imp_src.getWidth();
int height = imp_src.getHeight();
for (int c = 0; c < 3; c++){
for (int i = 0; i<double_stack[c].length; i++){
chn_avg[c] += double_stack[c][i];
}
}
chn_avg[0] /= width*height/4;
chn_avg[1] /= width*height/4;
chn_avg[2] /= width*height/2;
System.out.println("Split channels averages: R="+chn_avg[0]+", G="+chn_avg[2]+", B="+chn_avg[1]);
}
String [] rbg_titles = {"Red", "Blue", "Green"};
ImageStack stack;
if (!this.correctionsParameters.debayer) {
// showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
// ImageStack
stack = sdfa_instance.makeStack(double_stack, imp_src.getWidth(), imp_src.getHeight(), rbg_titles);
result= new ImagePlus(titleFull, stack);
eyesisCorrections.saveAndShow(result, this.correctionsParameters);
return result;
}
// =================
if (debugLevel > 0) {
System.out.println("Showing image BEFORE_CLT_PROC");
sdfa_instance.showArrays(double_stack, imp_src.getWidth(), imp_src.getHeight(), true, "BEFORE_CLT_PROC", rbg_titles);
}
if (this.correctionsParameters.deconvolve) { // process with DCT, otherwise use simple debayer
ImageDtt image_dtt = new ImageDtt();
/*
double [][][][][] clt_data = image_dtt.cltStack(
stack,
channel,
clt_parameters,
clt_parameters.ishift_x, //final int shiftX, // shift image horizontally (positive - right)
clt_parameters.ishift_y, //final int shiftY, // shift image vertically (positive - down)
threadsMax,
debugLevel,
updateStatus);
*/
for (int i =0 ; i < double_stack[0].length; i++){
// double_stack[0][i]*=2.0; // Scale red twice to compensate less pixels than green
// double_stack[1][i]*=2.0; // Scale blue twice to compensate less pixels than green
double_stack[2][i]*=0.5; // Scale blue twice to compensate less pixels than green
}
double [][][][][] clt_data = image_dtt.clt_aberrations(
double_stack, // final double [][] imade_data,
imp_src.getWidth(), // final int width,
clt_kernels[channel], // final double [][][][][] clt_kernels, // [color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
clt_parameters.kernel_step,
clt_parameters.transform_size,
clt_parameters.clt_window,
clt_parameters.shift_x, // final int shiftX, // shift image horizontally (positive - right) - just for testing
clt_parameters.shift_y, // final int shiftY, // shift image vertically (positive - down)
clt_parameters.tileX, // final int debug_tileX,
clt_parameters.tileY, // final int debug_tileY,
(clt_parameters.dbg_mode & 64) != 0, // no fract shift
(clt_parameters.dbg_mode & 128) != 0, // no convolve
(clt_parameters.dbg_mode & 256) != 0, // transpose convolve
threadsMax,
debugLevel);
// updateStatus);
System.out.println("clt_data.length="+clt_data.length+" clt_data[0].length="+clt_data[0].length
+" clt_data[0][0].length="+clt_data[0][0].length+" clt_data[0][0][0].length="+
clt_data[0][0][0].length+" clt_data[0][0][0][0].length="+clt_data[0][0][0][0].length);
/*
if (dct_parameters.color_DCT){ // convert RBG -> YPrPb
dct_data = image_dtt.dct_color_convert(
dct_data,
colorProcParameters.kr,
colorProcParameters.kb,
dct_parameters.sigma_rb, // blur of channels 0,1 (r,b) in addition to 2 (g)
dct_parameters.sigma_y, // blur of Y from G
dct_parameters.sigma_color, // blur of Pr, Pb in addition to Y
threadsMax,
debugLevel);
} else { // just LPF RGB
*/
if (clt_parameters.corr_sigma > 0){ // no filter at all
for (int chn = 0; chn < clt_data.length; chn++) {
image_dtt.clt_lpf(
clt_parameters.corr_sigma,
clt_data[chn],
threadsMax,
debugLevel);
}
}
/*
}
*/
int tilesY = imp_src.getHeight()/clt_parameters.transform_size;
int tilesX = imp_src.getWidth()/clt_parameters.transform_size;
if (debugLevel > 0){
System.out.println("--tilesX="+tilesX);
System.out.println("--tilesY="+tilesY);
}
if (debugLevel > 1){
double [][] clt = new double [clt_data.length*4][];
for (int chn = 0; chn < clt_data.length; chn++) {
double [][] clt_set = image_dtt.clt_dbg(
clt_data [chn],
threadsMax,
debugLevel);
for (int ii = 0; ii < clt_set.length; ii++) clt[chn*4+ii] = clt_set[ii];
}
if (debugLevel > 0){
sdfa_instance.showArrays(clt,
tilesX*clt_parameters.transform_size,
tilesY*clt_parameters.transform_size,
true,
result.getTitle()+"-CLT");
}
}
double [][] iclt_data = new double [clt_data.length][];
for (int chn=0; chn<clt_data.length;chn++){
iclt_data[chn] = image_dtt.iclt_2d(
clt_data[chn], // scanline representation of dcd data, organized as dct_size x dct_size tiles
clt_parameters.transform_size, // final int
clt_parameters.clt_window, // window_type
15, // clt_parameters.iclt_mask, //which of 4 to transform back
0, // clt_parameters.dbg_mode, //which of 4 to transform back
threadsMax,
debugLevel);
}
/*
if (dct_parameters.color_DCT){ // convert RBG -> YPrPb
if (debugLevel > 0) sdfa_instance.showArrays(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-IDCT-YPrPb");
if (dct_parameters.nonlin && ((dct_parameters.nonlin_y != 0.0) || (dct_parameters.nonlin_c != 0.0))) {
System.out.println("Applying edge emphasis, nonlin_y="+dct_parameters.nonlin_y+
", nonlin_c="+dct_parameters.nonlin_c+", nonlin_corn="+dct_parameters.nonlin_corn);
idct_data = edge_emphasis(
idct_data, // final double [][] yPrPb,
(tilesX + 1) * dct_parameters.dct_size, // final int width,
dct_parameters.dct_size, // final int step, //(does not need to be this) // just for multi-threading efficiency?
dct_parameters.nonlin_max_y, // final double nonlin_max_y = 1.0; // maximal amount of nonlinear line/edge emphasis for Y component
dct_parameters.nonlin_max_c, // final double nonlin_max_c = 1.0; // maximal amount of nonlinear line/edge emphasis for C component
dct_parameters.nonlin_y, // final double nonlin_y, // = 0.01; // amount of nonlinear line/edge emphasis for Y component
dct_parameters.nonlin_c, // final double nonlin_c, // = 0.01; // amount of nonlinear line/edge emphasis for C component
dct_parameters.nonlin_corn, // final double nonlin_corn, // = 0.5; // relative weight for nonlinear corner elements
(dct_parameters.denoise? dct_parameters.denoise_y:0.0), // final double denoise_y, // = 1.0; // maximal total smoothing of the Y post-kernel (will compete with edge emphasis)
(dct_parameters.denoise? dct_parameters.denoise_c:0.0), // final double denoise_c, // = 1.0; // maximal total smoothing of the color differences post-kernel (will compete with edge emphasis)
dct_parameters.denoise_y_corn, // final double denoise_y_corn, // = 0.5; // weight of the 4 corner pixels during denoise y (relative to 4 straight)
dct_parameters.denoise_c_corn, // final double denoise_c_corn, // = 0.5; // weight of the 4 corner pixels during denoise y (relative to 4 straight)
dct_parameters.dct_size, //, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel)
if (debugLevel > 0) sdfa_instance.showArrays(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-EMPH-"+dct_parameters.nonlin_y+"_"+dct_parameters.nonlin_c+"_"+dct_parameters.nonlin_corn);
}
// temporary convert back to RGB
idct_data = YPrPbToRBG(idct_data,
colorProcParameters.kr, // 0.299;
colorProcParameters.kb, // 0.114;
(tilesX + 1) * dct_parameters.dct_size);
} else {
if (dct_parameters.post_debayer){ // post_debayer
if (debugLevel > -1) System.out.println("Applying post-debayer");
if (debugLevel > -1) sdfa_instance.showArrays(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-rbg_before");
idct_data = post_debayer( // debayer in pixel domain after aberration correction
idct_data, // final double [][] rbg, // yPrPb,
(tilesX + 1) * dct_parameters.dct_size, // final int width,
dct_parameters.dct_size, // final int step, // just for multi-threading efficiency?
dct_parameters.dct_size, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel)
// add here YPrPb conversion, then edge_emphasis
if (debugLevel > -1) sdfa_instance.showArrays(
idct_data,
(tilesX + 1) * dct_parameters.dct_size,
(tilesY + 1) * dct_parameters.dct_size,
true,
result.getTitle()+"-rbg_after");
} else {
*/
// if (debugLevel > -1) System.out.println("Applyed LPF, sigma = "+dct_parameters.dbg_sigma);
if (debugLevel > -1) sdfa_instance.showArrays(
iclt_data,
(tilesX + 1) * clt_parameters.transform_size,
(tilesY + 1) * clt_parameters.transform_size,
true,
result.getTitle()+"-rbg_sigma");
/*
}
}
*/
if (debugLevel > 0) sdfa_instance.showArrays(iclt_data,
(tilesX + 1) * clt_parameters.transform_size,
(tilesY + 1) * clt_parameters.transform_size,
true,
result.getTitle()+"-ICLT-RGB");
// convert to ImageStack of 3 slices
String [] sliceNames = {"red", "blue", "green"};
stack = sdfa_instance.makeStack(
iclt_data,
(tilesX + 1) * clt_parameters.transform_size,
(tilesY + 1) * clt_parameters.transform_size,
sliceNames); // or use null to get chn-nn slice names
} else { // if (this.correctionsParameters.deconvolve) - here use a simple debayer
System.out.println("Bypassing CLT-based aberration correction");
stack = sdfa_instance.makeStack(double_stack, imp_src.getWidth(), imp_src.getHeight(), rbg_titles);
debayer_rbg(stack, 0.25); // simple standard 3x3 kernel debayer
}
if (debugLevel > -1){
double [] chn_avg = {0.0,0.0,0.0};
float [] pixels;
int width = stack.getWidth();
int height = stack.getHeight();
for (int c = 0; c <3; c++){
pixels = (float[]) stack.getPixels(c+1);
for (int i = 0; i<pixels.length; i++){
chn_avg[c] += pixels[i];
}
}
chn_avg[0] /= width*height;
chn_avg[1] /= width*height;
chn_avg[2] /= width*height;
System.out.println("Processed channels averages: R="+chn_avg[0]+", G="+chn_avg[2]+", B="+chn_avg[1]);
}
if (!this.correctionsParameters.colorProc){
result= new ImagePlus(titleFull, stack);
eyesisCorrections.saveAndShow(
result,
this.correctionsParameters);
return result;
}
if (debugLevel > 1) System.out.println("before colors.1");
//Processing colors - changing stack sequence to r-g-b (was r-b-g)
if (!eyesisCorrections.fixSliceSequence(
stack,
debugLevel)){
if (debugLevel > -1) System.out.println("fixSliceSequence() returned false");
return null;
}
if (debugLevel > 1) System.out.println("before colors.2");
if (debugLevel > 1){
ImagePlus imp_dbg=new ImagePlus(imp_src.getTitle()+"-"+channel+"-preColors",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
if (debugLevel > 1) System.out.println("before colors.3, scaleExposure="+scaleExposure+" scale = "+(255.0/eyesisCorrections.psfSubpixelShouldBe4/eyesisCorrections.psfSubpixelShouldBe4/scaleExposure));
CorrectionColorProc correctionColorProc=new CorrectionColorProc(eyesisCorrections.stopRequested);
double [][] yPrPb=new double [3][];
// if (dct_parameters.color_DCT){
// need to get YPbPr - not RGB here
// } else {
correctionColorProc.processColorsWeights(stack, // just gamma convert? TODO: Cleanup? Convert directly form the linear YPrPb
// 255.0/this.psfSubpixelShouldBe4/this.psfSubpixelShouldBe4, // double scale, // initial maximal pixel value (16))
// 255.0/eyesisCorrections.psfSubpixelShouldBe4/eyesisCorrections.psfSubpixelShouldBe4/scaleExposure, // double scale, // initial maximal pixel value (16))
// 255.0/2/2/scaleExposure, // double scale, // initial maximal pixel value (16))
255.0/scaleExposure, // double scale, // initial maximal pixel value (16))
colorProcParameters,
channelGainParameters,
channel,
null, //correctionDenoise.getDenoiseMask(),
this.correctionsParameters.blueProc,
debugLevel);
if (debugLevel > 1) System.out.println("Processed colors to YPbPr, total number of slices="+stack.getSize());
if (debugLevel > 1) {
ImagePlus imp_dbg=new ImagePlus("procColors",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
float [] fpixels;
int [] slices_YPrPb = {8,6,7};
yPrPb=new double [3][];
for (int n = 0; n < slices_YPrPb.length; n++){
fpixels = (float[]) stack.getPixels(slices_YPrPb[n]);
yPrPb[n] = new double [fpixels.length];
for (int i = 0; i < fpixels.length; i++) yPrPb[n][i] = fpixels[i];
}
if (toRGB) {
System.out.println("correctionColorProc.YPrPbToRGB");
stack = YPrPbToRGB(yPrPb,
colorProcParameters.kr, // 0.299;
colorProcParameters.kb, // 0.114;
stack.getWidth());
title=titleFull; // including "-DECONV" or "-COMBO"
titleFull=title+"-RGB-float";
//Trim stack to just first 3 slices
if (debugLevel > 1){ // 2){
ImagePlus imp_dbg=new ImagePlus("YPrPbToRGB",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
while (stack.getSize() > 3) stack.deleteLastSlice();
if (debugLevel > 1) System.out.println("Trimming color stack");
} else {
title=titleFull; // including "-DECONV" or "-COMBO"
titleFull=title+"-YPrPb"; // including "-DECONV" or "-COMBO"
if (debugLevel > 1) System.out.println("Using full stack, including YPbPr");
}
result= new ImagePlus(titleFull, stack);
// Crop image to match original one (scaled to oversampling)
if (crop){ // always crop if equirectangular
if (debugLevel > 1) System.out.println("cropping");
stack = eyesisCorrections.cropStack32(stack,splitParameters);
if (debugLevel > 2) { // 2){
ImagePlus imp_dbg=new ImagePlus("cropped",stack);
eyesisCorrections.saveAndShow(
imp_dbg,
this.correctionsParameters);
}
}
// rotate the result
if (rotate){ // never rotate for equirectangular
stack=eyesisCorrections.rotateStack32CW(stack);
}
if (!toRGB && !this.correctionsParameters.jpeg){ // toRGB set for equirectangular
if (debugLevel > 1) System.out.println("!toRGB && !this.correctionsParameters.jpeg");
eyesisCorrections.saveAndShow(result, this.correctionsParameters);
return result;
} else { // that's not the end result, save if required
if (debugLevel > 1) System.out.println("!toRGB && !this.correctionsParameters.jpeg - else");
eyesisCorrections.saveAndShow(result,
eyesisCorrections.correctionsParameters,
eyesisCorrections.correctionsParameters.save32,
false,
eyesisCorrections.correctionsParameters.JPEG_quality); // save, no show
}
// convert to RGB48 (16 bits per color component)
ImagePlus imp_RGB;
stack=eyesisCorrections.convertRGB32toRGB16Stack(
stack,
rgbParameters);
titleFull=title+"-RGB48";
result= new ImagePlus(titleFull, stack);
// ImagePlus imp_RGB24;
result.updateAndDraw();
if (debugLevel > 1) System.out.println("result.updateAndDraw(), "+titleFull+"-RGB48");
CompositeImage compositeImage=eyesisCorrections.convertToComposite(result);
if (!this.correctionsParameters.jpeg && !advanced){ // RGB48 was the end result
if (debugLevel > 1) System.out.println("if (!this.correctionsParameters.jpeg && !advanced)");
eyesisCorrections.saveAndShow(compositeImage, this.correctionsParameters);
return result;
} else { // that's not the end result, save if required
if (debugLevel > 1) System.out.println("if (!this.correctionsParameters.jpeg && !advanced) - else");
eyesisCorrections.saveAndShow(compositeImage, this.correctionsParameters, this.correctionsParameters.save16, false); // save, no show
// eyesisCorrections.saveAndShow(compositeImage, this.correctionsParameters, this.correctionsParameters.save16, true); // save, no show
}
imp_RGB=eyesisCorrections.convertRGB48toRGB24(
stack,
title+"-RGB24",
0, 65536, // r range 0->0, 65536->256
0, 65536, // g range
0, 65536);// b range
if (JPEG_scale!=1.0){
ImageProcessor ip=imp_RGB.getProcessor();
ip.setInterpolationMethod(ImageProcessor.BICUBIC);
ip=ip.resize((int)(ip.getWidth()*JPEG_scale),(int) (ip.getHeight()*JPEG_scale));
imp_RGB= new ImagePlus(imp_RGB.getTitle(),ip);
imp_RGB.updateAndDraw();
}
eyesisCorrections.saveAndShow(imp_RGB, this.correctionsParameters);
return result;
}
} }
...@@ -2890,16 +2890,7 @@ private Panel panel1, ...@@ -2890,16 +2890,7 @@ private Panel panel1,
int numChannels=EYESIS_CORRECTIONS.getNumChannels(); int numChannels=EYESIS_CORRECTIONS.getNumChannels();
NONLIN_PARAMETERS.modifyNumChannels(numChannels); NONLIN_PARAMETERS.modifyNumChannels(numChannels);
CHANNEL_GAINS_PARAMETERS.modifyNumChannels(numChannels); CHANNEL_GAINS_PARAMETERS.modifyNumChannels(numChannels);
/*
if (CORRECTION_PARAMETERS.deconvolve && (NONLIN_PARAMETERS.noiseGainPower!=0)) {
EYESIS_CORRECTIONS.updateImageNoiseGains(
NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
CONVOLVE_FFT_SIZE, //int fftSize, // 128 - fft size, kernel size should be size/2
THREADS_MAX, // int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, // boolean updateStatus,
DEBUG_LEVEL); //int globalDebugLevel){
}
*/
if (!EYESIS_DCT.DCTKernelsAvailable()){ if (!EYESIS_DCT.DCTKernelsAvailable()){
if (DEBUG_LEVEL > 0){ if (DEBUG_LEVEL > 0){
System.out.println("Reading/converting DCT kernels"); System.out.println("Reading/converting DCT kernels");
...@@ -2914,11 +2905,7 @@ private Panel panel1, ...@@ -2914,11 +2905,7 @@ private Panel panel1,
EYESIS_DCT.showKernels(); // show restored kernels EYESIS_DCT.showKernels(); // show restored kernels
} }
} }
// EYESIS_CORRECTIONS.processChannelImages(
EYESIS_DCT.processDCTChannelImages( EYESIS_DCT.processDCTChannelImages(
// SPLIT_PARAMETERS, // EyesisCorrectionParameters.SplitParameters splitParameters,
DCT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters, DCT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters, DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters, NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
...@@ -4096,6 +4083,7 @@ private Panel panel1, ...@@ -4096,6 +4083,7 @@ private Panel panel1,
if (EYESIS_DCT != null){ if (EYESIS_DCT != null){
EYESIS_DCT.resetCLTKernels(); EYESIS_DCT.resetCLTKernels();
} }
} else if (label.equals("Read CLT kernels")) { } else if (label.equals("Read CLT kernels")) {
if (!CLT_PARAMETERS.showDialog()) return; if (!CLT_PARAMETERS.showDialog()) return;
if (EYESIS_DCT == null){ if (EYESIS_DCT == null){
...@@ -4145,6 +4133,92 @@ private Panel panel1, ...@@ -4145,6 +4133,92 @@ private Panel panel1,
DEBUG_LEVEL); DEBUG_LEVEL);
} }
return;
} else if (label.equals("CLT process files")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
EYESIS_CORRECTIONS.setDebug(DEBUG_LEVEL);
if (EYESIS_DCT == null){
EYESIS_DCT = new EyesisDCT (
EYESIS_CORRECTIONS,
CORRECTION_PARAMETERS,
DCT_PARAMETERS);
if (DEBUG_LEVEL > 0){
System.out.println("Created new EyesisDCT instance, will need to read CLT kernels");
}
}
String configPath=null;
if (EYESIS_CORRECTIONS.correctionsParameters.saveSettings) {
configPath=EYESIS_CORRECTIONS.correctionsParameters.selectResultsDirectory(
true,
true);
if (configPath==null){
String msg="No results directory selected, command aborted";
System.out.println("Warning: "+msg);
IJ.showMessage("Warning",msg);
return;
}
configPath+=Prefs.getFileSeparator()+"autoconfig";
try {
saveTimestampedProperties(
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
} catch (Exception e){
String msg="Failed to save configuration to "+configPath+", command aborted";
System.out.println("Error: "+msg);
IJ.showMessage("Error",msg);
return;
}
}
EYESIS_CORRECTIONS.initSensorFiles(DEBUG_LEVEL);
int numChannels=EYESIS_CORRECTIONS.getNumChannels();
NONLIN_PARAMETERS.modifyNumChannels(numChannels);
CHANNEL_GAINS_PARAMETERS.modifyNumChannels(numChannels);
if (!EYESIS_DCT.CLTKernelsAvailable()){
if (DEBUG_LEVEL > 0){
System.out.println("Reading CLT kernels");
}
EYESIS_DCT.readCLTKernels(
CLT_PARAMETERS,
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
if (DEBUG_LEVEL > 1){
EYESIS_DCT.showCLTKernels(
THREADS_MAX,
UPDATE_STATUS, // update status info
DEBUG_LEVEL);
}
}
///========================================
EYESIS_DCT.processCLTChannelImages(
CLT_PARAMETERS, // EyesisCorrectionParameters.DCTParameters dct_parameters,
DEBAYER_PARAMETERS, //EyesisCorrectionParameters.DebayerParameters debayerParameters,
NONLIN_PARAMETERS, //EyesisCorrectionParameters.NonlinParameters nonlinParameters,
COLOR_PROC_PARAMETERS, //EyesisCorrectionParameters.ColorProcParameters colorProcParameters,
CHANNEL_GAINS_PARAMETERS, //CorrectionColorProc.ColorGainsParameters channelGainParameters,
RGB_PARAMETERS, //EyesisCorrectionParameters.RGBParameters rgbParameters,
EQUIRECTANGULAR_PARAMETERS, // EyesisCorrectionParameters.EquirectangularParameters equirectangularParameters,
CONVOLVE_FFT_SIZE, //int convolveFFTSize, // 128 - fft size, kernel size should be size/2
THREADS_MAX, //final int threadsMax, // maximal number of threads to launch
UPDATE_STATUS, //final boolean updateStatus,
DEBUG_LEVEL); //final int debugLevel);
if (configPath!=null) {
saveTimestampedProperties( // save config again
configPath, // full path or null
null, // use as default directory if path==null
true,
PROPERTIES);
}
return; return;
// End of buttons code // End of buttons code
......
...@@ -657,6 +657,98 @@ public class ImageDtt { ...@@ -657,6 +657,98 @@ public class ImageDtt {
return dpixels; return dpixels;
} }
// perform 2d clt and apply aberration corrections, all colors
public double [][][][][] clt_aberrations(
final double [][] imade_data,
final int width,
final double [][][][][] clt_kernels, // [color][tileY][tileX][band][pixel] , size should match image (have 1 tile around)
final int kernel_step,
final int transform_size,
final int window_type,
final double shiftX, // shift image horizontally (positive - right) - just for testing
final double shiftY, // shift image vertically (positive - down)
final int debug_tileX,
final int debug_tileY,
final boolean no_fract_shift,
final boolean no_deconvolution,
final boolean transpose,
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
final int nChn = imade_data.length;
final int height=imade_data[0].length/width;
final int tilesX=width/transform_size;
final int tilesY=height/transform_size;
final int nTilesInChn=tilesX*tilesY;
final int nTiles=tilesX*tilesY*nChn;
final double [][][][][] clt_data = new double[nChn][tilesY][tilesX][4][];
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
if (globalDebugLevel > 0) {
System.out.println("clt_aberrations(): width="+width+" height="+height+" transform_size="+transform_size+
" debug_tileX="+debug_tileX+" debug_tileY="+debug_tileY+" globalDebugLevel="+globalDebugLevel);
}
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
DttRad2 dtt = new DttRad2(transform_size);
dtt.set_window(window_type);
int tileY,tileX, chn;
// showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
double centerX; // center of aberration-corrected (common model) tile, X
double centerY; //
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
chn=nTile/nTilesInChn;
tileY =(nTile % nTilesInChn)/tilesX;
tileX = nTile % tilesX;
centerX = tileX * transform_size - transform_size/2 - shiftX;
centerY = tileY * transform_size - transform_size/2 - shiftY;
double [] fract_shiftXY = extract_correct_tile( // return a pair of resudual offsets
imade_data,
width, // image width
clt_kernels, // [color][tileY][tileX][band][pixel]
clt_data[chn][tileY][tileX], //double [][] clt_tile, // should be double [4][];
kernel_step,
transform_size,
dtt,
chn,
centerX, // center of aberration-corrected (common model) tile, X
centerY, //
(globalDebugLevel > -1) && (tileX == debug_tileX) && (tileY == debug_tileY) && (chn == 2), // external tile compare
no_deconvolution,
transpose);
if ((globalDebugLevel > -1) && (debug_tileX == tileX) && (debug_tileY == tileY) && (chn == 2)) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
String [] titles = {"CC","SC","CS","SS"};
sdfa_instance.showArrays(clt_data[chn][tileY][tileX], transform_size, transform_size, true, "pre-shifted_x"+tileX+"_y"+tileY, titles);
}
if (!no_fract_shift) {
// apply residual shift
fract_shift( // fractional shift in transform domain. Currently uses sin/cos - change to tables with 2? rotations
clt_data[chn][tileY][tileX], // double [][] clt_tile,
transform_size,
fract_shiftXY[0], // double shiftX,
fract_shiftXY[1], // double shiftY,
(globalDebugLevel > 0) && (tileX == debug_tileX) && (tileY == debug_tileY)); // external tile compare
if ((globalDebugLevel > -1) && (debug_tileX == tileX) && (debug_tileY == tileY)) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
String [] titles = {"CC","SC","CS","SS"};
sdfa_instance.showArrays(clt_data[chn][tileY][tileX], transform_size, transform_size, true, "shifted_x"+tileX+"_y"+tileY, titles);
}
}
}
}
};
}
startAndJoin(threads);
return clt_data;
}
// perform 2d clt, result is [tileY][tileX][cc_sc_cs_ss][index_in_tile] // perform 2d clt, result is [tileY][tileX][cc_sc_cs_ss][index_in_tile]
public double [][][][] clt_2d( public double [][][][] clt_2d(
final double [] dpixels, final double [] dpixels,
...@@ -1050,7 +1142,7 @@ public class ImageDtt { ...@@ -1050,7 +1142,7 @@ public class ImageDtt {
filter_direct[i] /= sum; filter_direct[i] /= sum;
} }
if (globalDebugLevel > 0) { if (globalDebugLevel > 1) {
for (int i=0; i<filter_direct.length;i++){ for (int i=0; i<filter_direct.length;i++){
System.out.println("dct_lpf_psf() "+i+": "+filter_direct[i]); System.out.println("dct_lpf_psf() "+i+": "+filter_direct[i]);
} }
...@@ -1060,7 +1152,7 @@ public class ImageDtt { ...@@ -1060,7 +1152,7 @@ public class ImageDtt {
final double [] dbg_filter= dtt.dttt_ii(filter); final double [] dbg_filter= dtt.dttt_ii(filter);
for (int i=0; i < filter.length;i++) filter[i] *= 2*dct_size; for (int i=0; i < filter.length;i++) filter[i] *= 2*dct_size;
if (globalDebugLevel > 0) { if (globalDebugLevel > 1) {
for (int i=0; i<filter.length;i++){ for (int i=0; i<filter.length;i++){
System.out.println("dct_lpf_psf() "+i+": "+filter[i]); System.out.println("dct_lpf_psf() "+i+": "+filter[i]);
} }
...@@ -1252,19 +1344,6 @@ public class ImageDtt { ...@@ -1252,19 +1344,6 @@ public class ImageDtt {
double [][][][][] dct_data = new double [nChn][][][][]; double [][][][][] dct_data = new double [nChn][][][][];
float [] fpixels; float [] fpixels;
int i,chn; //tileX,tileY; int i,chn; //tileX,tileY;
/* find number of the green channel - should be called "green", if none - use last */
// Extract float pixels from inage stack, convert each to double
// EyesisDCT.DCTKernels dct_kernels = null;
// dct_kernels = eyesisDCT.kernels[subcamera];
// if (dct_kernels == null){
// System.out.println("No DCT kernels available for subcamera # "+subcamera);
// } else if (debugLevel>0){
// System.out.println("Using DCT kernels for subcamera # "+subcamera);
// }
// if (dctParameters.kernel_chn >=0 ){
// dct_kernels = eyesisDCT.kernels[dctParameters.kernel_chn];
// }
for (chn=0;chn<nChn;chn++) { for (chn=0;chn<nChn;chn++) {
fpixels= (float[]) imageStack.getPixels(chn+1); fpixels= (float[]) imageStack.getPixels(chn+1);
...@@ -1331,13 +1410,15 @@ public class ImageDtt { ...@@ -1331,13 +1410,15 @@ public class ImageDtt {
void clt_convert_double_kernel( // converts double resolution kernel void clt_convert_double_kernel( // converts double resolution kernel
double [] src_kernel, // double [] src_kernel, //
double [] dst_kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) +2 size - kernel and dx, dy to the nearest 1/2 pixels double [] dst_kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) + extra_items size - kernel and dx, dy to the nearest 1/2 pixels + actual full center shift)
int src_size, // 64 int src_size, // 64
int dtt_size) // 8 int dtt_size) // 8
{ {
int [] indices = {0,-src_size,-1,1,src_size,-src_size-1,-src_size+1,src_size-1,src_size+1}; int [] indices = {0,-src_size,-1,1,src_size,-src_size-1,-src_size+1,src_size-1,src_size+1};
double [] weights = {0.25,0.125,0.125,0.125,0.125,0.0625,0.0625,0.0625,0.0625}; // double [] weights = {0.25,0.125,0.125,0.125,0.125,0.0625,0.0625,0.0625,0.0625};
// sum = 4.0, so sum of all kernels is ~ the same
double [] weights = {1.0, 0.5, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.25};
int src_center = src_size / 2; // 32 int src_center = src_size / 2; // 32
// Find center // Find center
double sx=0.0, sy = 0.0, s = 0.0; double sx=0.0, sy = 0.0, s = 0.0;
...@@ -1372,12 +1453,15 @@ public class ImageDtt { ...@@ -1372,12 +1453,15 @@ public class ImageDtt {
} }
dst_kernel[indx++] = 0.5*(src_x - src_center); dst_kernel[indx++] = 0.5*(src_x - src_center);
dst_kernel[indx++] = 0.5*(src_y - src_center); dst_kernel[indx++] = 0.5*(src_y - src_center);
dst_kernel[indx++] = 0.5*(sx / s); // actual center shift in pixels (to interapolate difference to neighbour regions)
dst_kernel[indx++] = 0.5*(sy / s);
} }
void clt_normalize_kernel( // void clt_normalize_kernel( //
double [] kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) +2 size (last 2 are not modified) double [] kernel, // should be (2*dtt_size-1) * (2*dtt_size-1) + 4 size (last (2*dtt_size-1) are not modified)
double [] window, // normalizes result kernel * window to have sum of elements == 1.0 double [] window, // normalizes result kernel * window to have sum of elements == 1.0
final int dtt_size) // 8 int dtt_size, // 8
boolean bdebug)
{ {
double s = 0.0; double s = 0.0;
int indx = 0; int indx = 0;
...@@ -1389,9 +1473,11 @@ public class ImageDtt { ...@@ -1389,9 +1473,11 @@ public class ImageDtt {
} }
} }
s = 1.0/s; s = 1.0/s;
indx = 0; int klen = (2*dtt_size-1) * (2*dtt_size-1);
for (int i = 0; i < (dtt_size * dtt_size); i++) { if (bdebug) System.out.println("clt_normalize_kernel(): s="+s);
kernel[indx++] *= s; for (int i = 0; i < klen; i++) {
//******************** Somewhere scale 16 ? ********************
kernel[i] *= 16*s;
} }
} }
...@@ -1413,7 +1499,7 @@ public class ImageDtt { ...@@ -1413,7 +1499,7 @@ public class ImageDtt {
sym_kernels[0][i*dtt_size+j] = 0.25*( kernel[indx0] + kernel[indx1] + kernel[indx2] + kernel[indx3]); sym_kernels[0][i*dtt_size+j] = 0.25*( kernel[indx0] + kernel[indx1] + kernel[indx2] + kernel[indx3]);
if (j > 0) sym_kernels[1][i*dtt_size+j-1] = 0.25*(-kernel[indx0] + kernel[indx1] - kernel[indx2] + kernel[indx3]); if (j > 0) sym_kernels[1][i*dtt_size+j-1] = 0.25*(-kernel[indx0] + kernel[indx1] - kernel[indx2] + kernel[indx3]);
if (i > 0) sym_kernels[2][(i-1)*dtt_size+j] = 0.25*(-kernel[indx0] - kernel[indx1] + kernel[indx2] + kernel[indx3]); if (i > 0) sym_kernels[2][(i-1)*dtt_size+j] = 0.25*(-kernel[indx0] - kernel[indx1] + kernel[indx2] + kernel[indx3]);
if ((i > 0) && (j > 0)) sym_kernels[3][i*dtt_size+j] = 0.25*(-kernel[indx0] + kernel[indx1] - kernel[indx2] + kernel[indx3]); if ((i > 0) && (j > 0)) sym_kernels[3][(i-1)*dtt_size+(j-1)] = 0.25*(-kernel[indx0] + kernel[indx1] - kernel[indx2] + kernel[indx3]);
} }
sym_kernels[1][i*dtt_size + dtt_size_m1] = 0.0; sym_kernels[1][i*dtt_size + dtt_size_m1] = 0.0;
sym_kernels[2][dtt_size_m1*dtt_size + i] = 0.0; sym_kernels[2][dtt_size_m1*dtt_size + i] = 0.0;
...@@ -1432,13 +1518,333 @@ public class ImageDtt { ...@@ -1432,13 +1518,333 @@ public class ImageDtt {
kernels[quad] = dtt.dttt_iiie(kernels[quad], quad, dtt_size); kernels[quad] = dtt.dttt_iiie(kernels[quad], quad, dtt_size);
} }
} }
/*
void clt_fill_coord_corr ( // add 6 more items to extra data: dxc/dx,dyc/dy, dyc/dx, dyc/dy - pixel shift when applied to different center
// and x0, y0 (which censor pixel this kernel applies to) ? - not needed
double [][] kernels, // set of 4 SS, AS, SA, AA kdernels, each dtt_size * dtt_size (may have 5-th with center shift
)
*/
public class CltExtra{
public double data_x = 0.0; // kernel data is relative to this displacement X (0.5 pixel increments)
public double data_y = 0.0; // kernel data is relative to this displacement Y (0.5 pixel increments)
public double center_x = 0.0; // actual center X (use to find derivatives)
public double center_y = 0.0; // actual center X (use to find derivatives)
public double dxc_dx = 0.0; // add this to data_x per each pixel X-shift relative to the kernel centger location
public double dxc_dy = 0.0; // same per each Y-shift pixel
public double dyc_dx = 0.0;
public double dyc_dy = 0.0;
public CltExtra(){}
public CltExtra(double [] data)
{
data_x = data[0]; // kernel data is relative to this displacement X (0.5 pixel increments)
data_y = data[1]; // kernel data is relative to this displacement Y (0.5 pixel increments)
center_x = data[2]; // actual center X (use to find derivatives)
center_y = data[3]; // actual center X (use to find derivatives)
dxc_dx = data[4]; // add this to data_x per each pixel X-shift relative to the kernel centger location
dxc_dy = data[5]; // same per each Y-shift pixel
dyc_dx = data[6];
dyc_dy = data[7];
}
public double [] getArray()
{
double [] rslt = {
data_x,
data_y,
center_x,
center_y,
dxc_dx,
dxc_dy,
dyc_dx,
dyc_dy
};
return rslt;
}
}
public void clt_fill_coord_corr(
final int kern_step, // distance between kernel centers, in pixels.
final double [][][][][] clt_data,
final int threadsMax, // maximal number of threads to launch
final int globalDebugLevel)
{
final int nChn=clt_data.length;
final int tilesY=clt_data[0].length;
final int tilesX=clt_data[0][0].length;
final int nTilesInChn=tilesX*tilesY;
final int nTiles=nTilesInChn*nChn;
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
int tileY,tileX,chn;
for (int nTile = ai.getAndIncrement(); nTile < nTiles; nTile = ai.getAndIncrement()) {
chn=nTile/nTilesInChn;
tileY =(nTile % nTilesInChn)/tilesX;
tileX = nTile % tilesX;
double s0=0.0, sx=0.0, sx2= 0.0, sy=0.0, sy2= 0.0, sz=0.0, sxz=0.0,
syz=0.0, sw=0.0, sxw=0.0, syw=0.0;
for (int dty = -1; dty < 2; dty++){
int ty = tileY+dty;
if ((ty >= 0) && (ty < tilesY)){
for (int dtx = -1; dtx < 2; dtx++){
int tx = tileX + dtx;
if ((tx >= 0) && (tx < tilesX)){
CltExtra ce = new CltExtra (clt_data[chn][ty][tx][4]);
s0 += 1;
sx += dtx;
sx2 += dtx*dtx;
sy += dty;
sy2 += dty*dty;
sz += ce.center_x;
sxz += dtx * ce.center_x;
syz += dty * ce.center_x;
sw += ce.center_y;
sxw += dtx * ce.center_y;
syw += dty * ce.center_y;
}
}
}
}
CltExtra ce = new CltExtra (clt_data[chn][tileY][tileX][4]);
double denom_x = (sx2*s0-sx*sx)*kern_step;
double denom_y = (sy2*s0-sy*sy)*kern_step;
ce.dxc_dx= (sxz*s0 - sz*sx)/denom_x;
ce.dxc_dy= (syz*s0 - sz*sy)/denom_y;
ce.dyc_dx= (sxw*s0 - sw*sx)/denom_x;
ce.dyc_dy= (syw*s0 - sw*sy)/denom_y;
clt_data[chn][tileY][tileX][4] = ce.getArray();
}
}
};
}
startAndJoin(threads);
}
public class CltTile{
public double [][] tile = new double[4][]; // 4 CLT tiles
public double fract_x; // remaining fractional offset X
public double fract_y; // remaining fractional offset X
}
// Extract and correct one image tile using kernel data, required result tile and shifts - x and y
// option - align to Bayer (integer shift by even pixels - no need
// input - RBG stack of sparse data
// return
// kernel [0][0] is centered at (-kernel_step/2,-kernel_step/2)
public double [] extract_correct_tile( // return a pair of resudual offsets
double [][] imade_data,
int width, // image width
double [][][][][] clt_kernels, // [color][tileY][tileX][band][pixel]
double [][] clt_tile, // should be double [4][];
int kernel_step,
int transform_size,
DttRad2 dtt,
int chn,
double centerX, // center of aberration-corrected (common model) tile, X
double centerY, //
boolean bdebug, // external tile compare
boolean dbg_no_deconvolution,
boolean dbg_transpose)
{
double [] residual_shift = new double[2];
int height = imade_data[0].length/width;
int transform_size2 = 2* transform_size;
// if (dtt == null) dtt = new DttRad2(transform_size); should have window set up
double [] tile_in = new double [4*transform_size*transform_size];
// 1. find closest kernel
int ktileX = (int) Math.round(centerX/kernel_step) + 1;
int ktileY = (int) Math.round(centerY/kernel_step) + 1;
if (ktileY < 0) ktileY = 0;
else if (ktileY >= clt_kernels[chn].length) ktileY = clt_kernels[chn].length-1;
if (ktileX < 0) ktileX = 0;
else if (ktileX >= clt_kernels[chn][ktileY].length) ktileX = clt_kernels[chn][ktileY].length-1;
CltExtra ce = new CltExtra (clt_kernels[chn][ktileY][ktileX][4]);
// 2. calculate correction for center of the kernel offset
double kdx = centerX - (ktileX -1 +0.5) * kernel_step; // difference in pixel
double kdy = centerY - (ktileY -1 +0.5) * kernel_step;
// 3. find top-left corner of the
// check signs, ce.data_x is "-" as if original kernel was shifted to "+" need to take pixel sifted "-"
// same with extra shift
double px = centerX - transform_size - (ce.data_x + ce.dxc_dx * kdx + ce.dxc_dy * kdy) ; // fractional left corner
double py = centerY - transform_size - (ce.data_y + ce.dyc_dx * kdx + ce.dyc_dy * kdy) ; // fractional top corner
int ctile_left = (int) Math.round(px);
int ctile_top = (int) Math.round(py);
residual_shift[0] = -(px - ctile_left);
residual_shift[1] = -(py - ctile_top);
// 4. Verify the tile fits in image and use System.arraycopy(sym_conv, 0, tile_in, 0, n2*n2) to copy data to tile_in
// if does not fit - extend by duplication? Or just use 0?
if ((ctile_left >= 0) && (ctile_left < (width - transform_size2)) &&
(ctile_top >= 0) && (ctile_top < (height - transform_size2))) {
for (int i = 0; i < transform_size2; i++){
System.arraycopy(imade_data[chn], (ctile_top + i) * width + ctile_left, tile_in, transform_size2 * i, transform_size2);
}
} else { // copy by 1
for (int i = 0; i < transform_size2; i++){
int pi = ctile_top + i;
if (pi < 0) pi = 0;
else if (pi >= height) pi = height - 1;
for (int j = 0; j < transform_size2; j++){
int pj = ctile_left + j;
if (pj < 0) pj = 0;
else if (pj >= width) pj = width - 1;
tile_in[transform_size2 * i + j] = imade_data[chn][pi * width + pj];
}
}
}
// Fold and transform
for (int dct_mode = 0; dct_mode <4; dct_mode++) {
clt_tile[dct_mode] = dtt.fold_tile (tile_in, transform_size, dct_mode); // DCCT, DSCT, DCST, DSST
clt_tile[dct_mode] = dtt.dttt_iv (clt_tile[dct_mode], dct_mode, transform_size);
}
if (bdebug) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
sdfa_instance.showArrays(tile_in, transform_size2, transform_size2, "tile_in_x"+ctile_left+"_y"+ctile_top);
String [] titles = {"CC","SC","CS","SS"};
sdfa_instance.showArrays(clt_tile, transform_size, transform_size, true, "clt_x"+ctile_left+"_y"+ctile_top, titles);
}
// deconvolve with kernel
if (!dbg_no_deconvolution) {
double [][] ktile = clt_kernels[chn][ktileY][ktileX];
convolve_tile(
clt_tile, // double [][] data, // array [transform_size*transform_size], will be updated DTT4 converted
ktile, // double [][] kernel, // array [4][transform_size*transform_size] DTT3 converted
transform_size,
bdebug);
// dbg_transpose);
}
if (bdebug) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
String [] titles = {"CC","SC","CS","SS"};
sdfa_instance.showArrays(clt_tile, transform_size, transform_size, true, "acorr_x"+ctile_left+"_y"+ctile_top, titles);
}
return residual_shift;
}
// public
public void convolve_tile(
double [][] data, // array [transform_size*transform_size], will be updated DTT4 converted
double [][] kernel, // array [4][transform_size*transform_size] DTT3 converted
int transform_size,
boolean bdebug) // externally decoded debug tile
// boolean dbg_transpose)
{
/* Direct matrix Z1: X2 ~= Z1 * Shift
* {{+cc -sc -cs +ss},
* {+sc +cc -ss -cs},
* {+cs -ss +cc -sc},
* {+ss +cs +sc +cc}}
*
* T= transp({cc, sc, cs, ss})
*/
/*
final int [][] zi =
{{ 0, -1, -2, 3},
{ 1, 0, -3, -2},
{ 2, -3, 0, -1},
{ 3, 2, 1, 0}};
final int [][] zi =
{{ 0, 1, 2, 3},
{-1, 0, -3, 2},
{-2, -3, 0, 1},
{ 3, -2, -1, 0}};
*/
// opposite sign from correlation
final int [][] zi = { //
{ 0, -1, -2, 3},
{ 1, 0, -3, -2},
{ 2, -3, 0, -1},
{ 3, 2, 1, 0}};
final int transform_len = transform_size * transform_size;
final double [][] rslt = new double[4][transform_len];
for (int i = 0; i < transform_len; i++) {
for (int n = 0; n<4; n++){
rslt[n][i] = 0;
for (int k=0; k<4; k++){
if (zi[n][k] < 0)
rslt[n][i] -= data[-zi[n][k]][i] * kernel[k][i];
else
rslt[n][i] += data[ zi[n][k]][i] * kernel[k][i];
}
}
}
if (bdebug) {
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
String [] titles = {"CC","SC","CS","SS"};
double [][] dbg_kern = {kernel[0],kernel[1],kernel[2],kernel[3]};
sdfa_instance.showArrays(data, transform_size, transform_size, true, "image_data", titles);
sdfa_instance.showArrays(dbg_kern, transform_size, transform_size, true, "kernel", titles);
sdfa_instance.showArrays(rslt, transform_size, transform_size, true, "aber_corr", titles);
}
for (int n = 0; n<4; n++){
data[n] = rslt[n];
}
}
public void fract_shift( // fractional shift in transform domain. Currently uses sin/cos - change to tables with 2? rotations
double [][] clt_tile,
int transform_size,
double shiftX,
double shiftY,
boolean bdebug)
{
int transform_len = transform_size*transform_size;
double [] cos_hor = new double [transform_len];
double [] sin_hor = new double [transform_len];
double [] cos_vert = new double [transform_len];
double [] sin_vert = new double [transform_len];
for (int i = 0; i < transform_size; i++){
double ch = Math.cos((i+0.5)*Math.PI*shiftX/transform_size);
double sh = Math.sin((i+0.5)*Math.PI*shiftX/transform_size);
double cv = Math.cos((i+0.5)*Math.PI*shiftY/transform_size);
double sv = Math.sin((i+0.5)*Math.PI*shiftY/transform_size);
for (int j = 0; j < transform_size; j++){
int iv = transform_size * j + i; // 2d DTT results are stored transposed!
int ih = transform_size * i + j;
cos_hor[ih] = ch;
sin_hor[ih] = sh;
cos_vert[iv] = cv;
sin_vert[iv] = sv;
}
}
if (bdebug){
showDoubleFloatArrays sdfa_instance = new showDoubleFloatArrays(); // just for debugging?
String [] titles = {"cos_hor","sin_hor","cos_vert","sin_vert"};
double [][] cs_dbg = {cos_hor, sin_hor, cos_vert, sin_vert};
sdfa_instance.showArrays(cs_dbg, transform_size, transform_size, true, "shift_cos_sin", titles);
}
double [][] tmp_tile = new double [4][transform_len];
// Horizontal shift CLT tiled data is stored in transposed way (horizontal - Y, vertical X)
for (int i = 0; i < cos_hor.length; i++) {
tmp_tile[0][i] = clt_tile[0][i] * cos_hor[i] - clt_tile[1][i] * sin_hor[i];
tmp_tile[1][i] = clt_tile[1][i] * cos_hor[i] + clt_tile[0][i] * sin_hor[i] ;
tmp_tile[2][i] = clt_tile[2][i] * cos_hor[i] - clt_tile[3][i] * sin_hor[i];
tmp_tile[3][i] = clt_tile[3][i] * cos_hor[i] + clt_tile[2][i] * sin_hor[i] ;
}
// Vertical shift (back to original array)
for (int i = 0; i < cos_hor.length; i++) {
clt_tile[0][i] = tmp_tile[0][i] * cos_vert[i] - tmp_tile[2][i] * sin_vert[i];
clt_tile[2][i] = tmp_tile[2][i] * cos_vert[i] + tmp_tile[0][i] * sin_vert[i];
clt_tile[1][i] = tmp_tile[1][i] * cos_vert[i] - tmp_tile[3][i] * sin_vert[i];
clt_tile[3][i] = tmp_tile[3][i] * cos_vert[i] + tmp_tile[1][i] * sin_vert[i];
}
}
public double [][][][] mdctScale( public double [][][][] mdctScale(
final ImageStack imageStack, final ImageStack imageStack,
final int subcamera, // final int subcamera, // not needed
final EyesisCorrectionParameters.DCTParameters dctParameters, // final EyesisCorrectionParameters.DCTParameters dctParameters, //
final int threadsMax, // maximal step in pixels on the maxRadius for 1 angular step (i.e. 0.5) final int threadsMax, // maximal step in pixels on the maxRadius for 1 angular step (i.e. 0.5)
final int debugLevel, final int debugLevel,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment