for (int i=0;i<channels.length;i++) if (eyesisCorrections.isChannelEnabled(channels[i])){
if (!enabledFiles[nFile]) numFilesToProcess++;
enabledFiles[nFile]=true;
numImagesToProcess++;
}
}
}
}
if (numFilesToProcess==0){
System.out.println("No files to process (of "+sourceFiles.length+")");
return;
} else {
if (debugLevel>0) System.out.println(numFilesToProcess+ " files to process (of "+sourceFiles.length+"), "+numImagesToProcess+" images to process");
}
double [] referenceExposures=eyesisCorrections.calcReferenceExposures(debugLevel); // multiply each image by this and divide by individual (if not NaN)
int [][] fileIndices=new int [numImagesToProcess][2]; // file index, channel number
int index=0;
for (int nFile=0;nFile<enabledFiles.length;nFile++){
if ((sourceFiles[nFile]!=null) && (sourceFiles[nFile].length()>1)) {
int [] channels={correctionsParameters.getChannelFromSourceTiff(sourceFiles[nFile])};
if (correctionsParameters.isJP4()){
int subCamera= channels[0]- correctionsParameters.firstSubCamera; // to match those in the sensor files
double [] scaleExposures = new double[channelFiles.length];
for (int srcChannel=0; srcChannel<channelFiles.length; srcChannel++){
int nFile=channelFiles[srcChannel];
imp_srcs[srcChannel]=null;
if (nFile >=0){
if (correctionsParameters.isJP4()){
int subchannel=eyesisCorrections.pixelMapping.getSubChannel(srcChannel);
if (this.correctionsParameters.swapSubchannels01) {
switch (subchannel){
case 0: subchannel=1; break;
case 1: subchannel=0; break;
}
}
if (debugLevel>0) System.out.println("Processing set " + setNames.get(nSet)+" channel "+srcChannel+" - subchannel "+subchannel+" of "+sourceFiles[nFile]);
if (pixels.length!=eyesisCorrections.channelVignettingCorrection[srcChannel].length){
System.out.println("Vignetting data for channel "+srcChannel+" has "+eyesisCorrections.channelVignettingCorrection[srcChannel].length+" pixels, image "+sourceFiles[nFile]+" has "+pixels.length);
return;
}
// TODO: Move to do it once:
double min_non_zero = 0.0;
for (int i=0;i<pixels.length;i++){
double d = eyesisCorrections.channelVignettingCorrection[srcChannel][i];
System.out.println("No files for the auxiliary camera match series "+quadCLT_main.image_name);
returnnull;
}
double[]referenceExposures_aux=quadCLT_aux.eyesisCorrections.calcReferenceExposures(debugLevel);// multiply each image by this and divide by individual (if not NaN)
false,// final boolean notch_mode, // use notch filter for inter-camera correlation to detect poles
false,// final boolean notch_mode, // use notch filter for inter-camera correlation to detect poles
0,// final int // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
0,// final int // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
// use default mode:
// first measurement - use default value:
clt_parameters.rig.no_int_x0,// boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
clt_parameters.rig.no_int_x0,// boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
threadsMax,// final int threadsMax, // maximal number of threads to launch
threadsMax,// final int threadsMax, // maximal number of threads to launch
false,// final boolean notch_mode, // use notch filter for inter-camera correlation to detect poles
0,// final int // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
// first measurement - use default value:
clt_parameters.rig.no_int_x0,// boolean no_int_x0, // do not offset window to integer maximum - used when averaging low textures to avoid "jumps" for very wide
threadsMax,// final int threadsMax, // maximal number of threads to launch
quadCLT_main,// QuadCLT quadCLT_main, // tiles should be set
quadCLT_aux,// QuadCLT quadCLT_aux,
tile_op,// int [][] tile_op, // common for both amin and aux
disparity_array,// double [][] disparity_array,
null,// double [][] ml_data, // data for ML - 10 layers - 4 center areas (3x3, 5x5,..) per camera-per direction, 1 - composite, and 1 with just 1 data (target disparity)
System.out.println("No files to process (of "+sourceFiles.length+")");
return;
}
double[]referenceExposures_main=quadCLT_main.eyesisCorrections.calcReferenceExposures(debugLevel);// multiply each image by this and divide by individual (if not NaN)
double[]referenceExposures_aux=quadCLT_aux.eyesisCorrections.calcReferenceExposures(debugLevel);// multiply each image by this and divide by individual (if not NaN)
thrownewException("Set names for cameras do not match: main camera: '"+set_channels_main[nSet].name()+"', aux. camera: '"+set_channels_main[nSet].name()+"'");
scaleExposures_main,// double [] scaleExposures_main, // probably not needed here - restores brightness of the final image
scaleExposures_aux,// double [] scaleExposures_aux, // probably not needed here - restores brightness of the final image
false,// final boolean notch_mode, // use notch filter for inter-camera correlation to detect poles
// averages measurements
clt_parameters.rig.lt_avg_radius,// final int lt_rad, // low texture mode - inter-correlation is averaged between the neighbors before argmax-ing, using
threadsMax,// final int threadsMax, // maximal number of threads to launch