Commit 7c62f986 authored by Andrey Filippov's avatar Andrey Filippov

Batch operation, video output

parent 29a3dec2
......@@ -94,7 +94,7 @@ public class CalibrationIllustrationParameters {
private double min_ts = 0.0;
private double max_ts = 2000000000.0;
int auto_range_wnd_type = 3; // 0 - piramid, 1 half-sin, 2-piramid squared, 3 - sin^2
int auto_range_wnd_type = 3; // 0 - pyramid, 1 half-sin, 2-pyramid squared, 3 - sin^2
boolean calib_offs_gain = true; // perform offset/gain calibration
double calib_offs_gain_ts = 0.0; // timestamp to start gain calibration
double calib_offs_gain_dur = 2000000000.0; // duration for gain calibration
......
......@@ -178,7 +178,7 @@ public class EyesisCorrections {
for (int nChn=0;nChn< this.usedChannels.length; nChn++) if (this.usedChannels[nChn]) sChannels+=" "+nChn;
System.out.println ("Number of used channels: "+numUsedChannels+" ("+sChannels+" )");
}
createChannelVignetting(correct_vignetting);
createChannelVignetting(correct_vignetting); // *** THIS READS ALL SOURCE FILES ***
if ((this.debugLevel>101) && (correctionsParameters.sourcePaths!=null) && (correctionsParameters.sourcePaths.length>0)) {
testFF(correctionsParameters.sourcePaths[0]);
}
......
......@@ -5930,7 +5930,7 @@ public class Eyesis_Correction implements PlugIn, ActionListener {
if (DEBUG_LEVEL > -2) {
System.out.println("++++++++++++++ Running initSensorFiles for the auxiliary camera ++++++++++++++");
}
EYESIS_CORRECTIONS_AUX.initSensorFiles(DEBUG_LEVEL + 2, false, // true,
EYESIS_CORRECTIONS_AUX.initSensorFiles(DEBUG_LEVEL + 2, false, // true, // ***** here reads all files *****
true, // false,
COLOR_PROC_PARAMETERS_AUX.correct_vignetting); // boolean correct_vignetting
......
......@@ -1114,8 +1114,16 @@ public class GpuQuad{ // quad camera description
final int corr_mask, // <0 - use corr mask from the tile tile_op, >=0 - overwrite all with non-zero corr_mask_tp
final int threadsMax) // maximal number of threads to launch
{
final int tilesX = getTilesX();
final int tilesY = getTilesY();
final int tilesX = tile_op[0].length; //getTilesX();
final int tilesY = tile_op.length; // getTilesY();
if (tilesY > tile_op.length) {
System.out.println("BUG!!!! tilesX="+tilesX+", tilesY="+tilesY+", tile_op.length="+
tile_op.length+", tile_op[0].length="+tile_op[0].length);
System.out.println();
System.out.println();
System.out.println();
System.out.println();
}
final AtomicInteger ai = new AtomicInteger(0);
final AtomicBoolean acorrs = new AtomicBoolean(false);
final List<TpTask> task_list = new CopyOnWriteArrayList<TpTask>();
......@@ -1630,6 +1638,27 @@ public class GpuQuad{ // quad camera description
null ); //int [] wh
}
public void deallocate4Images(int [] wh) {
if (gpu_4_images_wh != null) {
if (wh == null) {
wh = new int[] {img_width, img_height};
}
if ((gpu_4_images_wh[0] != wh[0]) || (gpu_4_images_wh[1] != wh[1])) {
deallocate4Images();
}
}
}
public void deallocate4Images() {
if (gpu_4_images_wh != null) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_corr_images_h[ncam]);
}
cuMemFree (gpu_4_images);
gpu_4_images = null;
gpu_4_images_wh = null;
}
}
public void execImcltRbgAll( // Now allocates/re-allocates GPU memory
boolean is_mono,
boolean ref_scene,
......@@ -1648,6 +1677,8 @@ public class GpuQuad{ // quad camera description
int tilesX = wh[0] / GPUTileProcessor.DTT_SIZE;
int tilesY = wh[1] / GPUTileProcessor.DTT_SIZE;
// Free if allocated but size mismatch
deallocate4Images(wh);
/*
if ((gpu_4_images_wh != null) && ((gpu_4_images_wh[0] != wh[0]) || (gpu_4_images_wh[1] != wh[1]))) {
for (int ncam = 0; ncam < num_cams; ncam++) {
cuMemFree (gpu_corr_images_h[ncam]);
......@@ -1655,7 +1686,7 @@ public class GpuQuad{ // quad camera description
cuMemFree (gpu_4_images);
gpu_4_images = null;
gpu_4_images_wh = null;
}
}*/
// Allocate if was not allocated or was freed
if (gpu_4_images == null) { // Allocate memory, create pointers
long [] device_stride = new long [1];
......@@ -3474,6 +3505,14 @@ public class GpuQuad{ // quad camera description
int tileY = tile / full_width;
int wtileX = tileX - woi.x;
int wtileY = tileY - woi.y;
if ((tileY >= texture_tiles.length) || (tileX >= texture_tiles[0].length)) {
System.out.println("BUG!!!! tileX="+tileX+", tileY="+tileY+", tile_op.length="+
texture_tiles.length+", tile_op[0].length="+texture_tiles[0].length);
System.out.println();
System.out.println();
System.out.println();
System.out.println();
}
texture_tiles[tileY][tileX] = new double [num_slices][texture_slice_size];
if ((wtileX >=0 ) && (wtileX < woi.width) && (wtileY >= 0) && (wtileY < woi.height)) {
for (int slice = 0; slice < num_slices; slice++) {
......
......@@ -142,10 +142,21 @@ public class ImageDtt extends ImageDttCPU {
final boolean macro_mode = macro_scale != 1; // correlate tile data instead of the pixel data
final int numcol = isMonochrome()?1:3;
// FIXME maybe something else is needed.
// When switching from larger images to smaller ones requested texture was smaller than
// still increased GPU window size
if (texture_tiles != null) { // maybe something else is needed
// GPUTileProcessor.DTT_SIZE
if ((texture_tiles.length != gpuQuad.getTilesY()) ||
(texture_tiles[0].length != gpuQuad.getTilesX())) {
gpuQuad.deallocate4Images();
}
}
final int width = gpuQuad.getImageWidth();
final int height = gpuQuad.getImageHeight();
final int tilesX=gpuQuad.getTilesX(); // width/transform_size;
final int tilesX=gpuQuad.getTilesX(); // width/transform_size; // still old - before
final int tilesY=gpuQuad.getTilesY(); // final int tilesY=height/transform_size;
final Thread[] threads = newThreadArray(threadsMax);
final AtomicInteger ai = new AtomicInteger(0);
......@@ -270,7 +281,7 @@ public class ImageDtt extends ImageDttCPU {
final boolean use_main = geometryCorrection_main != null;
boolean [] used_corrs = new boolean[1];
final int all_pairs = imgdtt_params.dbg_pair_mask; //TODO: use tile tasks
final TpTask[] tp_tasks = gpuQuad.setTpTask(
final TpTask[] tp_tasks = gpuQuad.setTpTask( // tile-op is 80x64
disparity_array, // final double [][] disparity_array, // [tilesY][tilesX] - individual per-tile expected disparity
disparity_corr, // final double disparity_corr,
used_corrs, // final boolean [] need_corrs, // should be initialized to boolean[1] or null
......
......@@ -175,6 +175,15 @@ public class IntersceneLma {
par_mask = param_select;
macrotile_centers = centers;
num_samples = 2 * centers.length;
/*
for (int i = 0; i < vector_XYS.length; i++){
if (((vector_XYS[i] == null) && (centers[i]!=null)) ||
((vector_XYS[i] != null) && (centers[i]==null))) {
vector_XYS[i] = null;
centers[i]= null;
}
}
*/
ErsCorrection ers_ref = reference_QuadClt.getErsCorrection();
ErsCorrection ers_scene = scene_QuadClt.getErsCorrection();
final double [] scene_xyz = (scene_xyz0 != null) ? scene_xyz0 : ers_scene.camera_xyz;
......@@ -198,13 +207,13 @@ public class IntersceneLma {
int num_pars = 0;
for (int i = 0; i < par_mask.length; i++) if (par_mask[i]) num_pars++;
par_indices = new int [num_pars];
num_pars = 0;
num_pars = 00;
for (int i = 0; i < par_mask.length; i++) if (par_mask[i]) par_indices[num_pars++] = i;
parameters_vector = new double [par_indices.length];
for (int i = 0; i < par_indices.length; i++) parameters_vector[i] = full_parameters_vector[par_indices[i]];
// parameters_initial = parameters_vector.clone();
setSamplesWeights(vector_XYS); // not regularization yet !
setSamplesWeights(vector_XYS); // not regularized yet !
last_jt = new double [parameters_vector.length][];
if (debug_level > 1) {
......@@ -218,7 +227,7 @@ public class IntersceneLma {
debug_level); // final int debug_level)
double [][] wjtj = getWJtJlambda( // USED in lwir all NAN
0.0, // final double lambda,
last_jt); // final double [][] jt)
last_jt); // final double [][] jt) all 0???
for (int i = 0; i < parameters_vector.length; i++) {
int indx = num_samples + i;
weights[indx] = param_regweights[par_indices[i]]/Math.sqrt(wjtj[i][i]);
......@@ -533,7 +542,7 @@ public class IntersceneLma {
private void setSamplesWeights(
final double [][] vector_XYS) // not regularization yet
final double [][] vector_XYS) // not regularizedn yet
{
this.weights = new double [num_samples + parameters_vector.length];
......@@ -575,7 +584,9 @@ public class IntersceneLma {
ImageDtt.startAndJoin(threads);
sum_weights = asum_weight.sum();
}
if (sum_weights <= 1E-8) {
System.out.println("!!!!!! setSamplesWeights(): sum_weights=="+sum_weights+" <= 1E-8");
}
ai.set(0);
// final double s = 0.5/asum_weight.sum();
final double s = 0.5/sum_weights;
......@@ -593,7 +604,7 @@ public class IntersceneLma {
pure_weight = 1.0;
}
/*
@Deprecated
private void normalizeWeights_old()
{
......@@ -633,8 +644,8 @@ public class IntersceneLma {
};
}
ImageDtt.startAndJoin(threads);
}
*/
private void normalizeWeights()
{
......@@ -642,7 +653,7 @@ public class IntersceneLma {
final AtomicInteger ai = new AtomicInteger(0);
double full_weight, sum_weight_pure;
if (thread_invariant) {
sum_weight_pure = 0;
sum_weight_pure = 00;
for (int i = 0; i < num_samples; i++) {
sum_weight_pure += weights[i];
}
......@@ -754,7 +765,7 @@ public class IntersceneLma {
threads[ithread] = new Thread() {
public void run() {
for (int iMTile = ai.getAndIncrement(); iMTile < macrotile_centers.length; iMTile = ai.getAndIncrement()) {
if ((macrotile_centers[iMTile]!=null) &&(weights[iMTile] > 0.0)){
if ((macrotile_centers[iMTile]!=null) &&(weights[2*iMTile] > 0.0)){ // was: weights[iMTile]?
//infinity_disparity
boolean is_infinity = macrotile_centers[iMTile][2] < infinity_disparity;
double [][] deriv_params = ers_ref.getDPxSceneDParameters(
......@@ -826,6 +837,9 @@ public class IntersceneLma {
if (j >= i) {
double d = 0.0;
for (int k = 0; k < nup_points; k++) {
if (jt[i][k] != 0) {
d+=0;
}
d += weights[k]*jt[i][k]*jt[j][k];
}
wjtjl[i][j] = d;
......
......@@ -2039,6 +2039,7 @@ public class QuadCLT extends QuadCLTCPU {
public static ImagePlus renderGPUFromDSI(
final int sensor_mask,
final boolean merge_channels,
final Rectangle full_woi_in, // show larger than sensor WOI in tiles (or null)
CLTParameters clt_parameters,
double [] disparity_ref,
......@@ -2105,6 +2106,7 @@ public class QuadCLT extends QuadCLTCPU {
debugLevel); // final int globalDebugLevel);
ImagePlus imp_render = scene.renderFromTD (
sensor_mask, // final int sensor_mask,
merge_channels, // boolean merge_channels,
clt_parameters, // CLTParameters clt_parameters,
clt_parameters.getColorProcParameters(scene.isAux()), //ColorProcParameters colorProcParameters,
clt_parameters.getRGBParameters(), //EyesisCorrectionParameters.RGBParameters rgbParameters,\
......@@ -2118,6 +2120,7 @@ public class QuadCLT extends QuadCLTCPU {
public ImagePlus renderFromTD (
int sensor_mask,
boolean merge_channels,
CLTParameters clt_parameters,
ColorProcParameters colorProcParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
......@@ -2132,10 +2135,43 @@ public class QuadCLT extends QuadCLTCPU {
use_reference,
wh); //int [] wh
// get data back from GPU
float [][][] iclt_fimg = new float [getNumSensors()][][];
final float [][][] iclt_fimg = new float [getNumSensors()][][];
int nchn = 0;
int ncol = 0;
int nTiles = 0;
for (int ncam = 0; ncam < iclt_fimg.length; ncam++) if (((1 << ncam) & sensor_mask) != 0){
iclt_fimg[ncam] = gpuQuad.getRBG(ncam); // updated window
ncol = iclt_fimg[ncam].length;
nTiles = iclt_fimg[ncam][0].length;
nchn++;
}
if (merge_channels) {
final double scale = 1.0 / nchn;
final float [][] iclt_fimg_combo = new float [ncol][nTiles];
final Thread[] threads = ImageDtt.newThreadArray(THREADS_MAX);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < iclt_fimg_combo[0].length; nTile = ai.getAndIncrement()) {
for (int ncol = 0; ncol < iclt_fimg_combo.length; ncol++) {
double d = 0;
for (int i = 0; i < iclt_fimg.length; i++) if (iclt_fimg[i] != null) {
d+=iclt_fimg[i][ncol][nTile];
}
iclt_fimg_combo[ncol][nTile] = (float) (d * scale);
}
}
}
};
}
ImageDtt.startAndJoin(threads);
iclt_fimg[0] = iclt_fimg_combo;
for (int i = 1; i < iclt_fimg.length; i++) {
iclt_fimg[i] = null;
}
}
// 2022/06/15 - handles variable window size
int out_width = gpuQuad.getImageWidth();// + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
int out_height = gpuQuad.getImageHeight(); // + gpuQuad.getDttSize(); // 2022/05/12 removed margins from gpuQuad.getRBG(ncam);
......@@ -5380,8 +5416,8 @@ public class QuadCLT extends QuadCLTCPU {
final int debugLevel)
{
final int tilesX = tp.getTilesX();
final int tilesY = tp.getTilesY();
final int tilesX = tp.getTilesX(); // may be different from last GPU run !
final int tilesY = tp.getTilesY(); // may be different from last GPU run !
/*
int d = ImageDtt.setImgMask(0, 0xf);
d = ImageDtt.setForcedDisparity(d,true);
......
......@@ -81,6 +81,7 @@ import ij.Prefs;
import ij.WindowManager;
//import ij.gui.Overlay;
import ij.io.FileSaver;
import ij.plugin.filter.AVI_Writer;
import ij.process.ColorProcessor;
import ij.process.FloatProcessor;
import ij.process.ImageProcessor;
......@@ -116,7 +117,7 @@ public class QuadCLTCPU {
public static final int DSRBG_MONO = 3;
public static final boolean USE_PRE_2021 = false; // temporary
public static final int THREADS_MAX = 100;
// public GPUTileProcessor.GpuQuad gpuQuad = null;
......@@ -760,6 +761,9 @@ public class QuadCLTCPU {
int debugLevel)
{
if (getGPUQuad() != null) {
getGPUQuad().deallocate4Images();
}
final int debugLevelInner=clt_parameters.batch_run? -2: debugLevel;
String jp4_copy_path= correctionsParameters.selectX3dDirectory(
this.image_name, // quad timestamp. Will be ignored if correctionsParameters.use_x3d_subdirs is false
......@@ -1174,7 +1178,7 @@ public class QuadCLTCPU {
return imp;
}
public void saveImagePlusInModelDirectory(
public String saveImagePlusInModelDirectory(
String suffix, // null - use title from the imp
ImagePlus imp)
{
......@@ -1187,8 +1191,37 @@ public class QuadCLTCPU {
FileSaver fs=new FileSaver(imp);
fs.saveAsTiff(file_path);
System.out.println("saveDoubleArrayInModelDirectory(): saved "+file_path);
return file_path;
}
public String saveAVIInModelDirectory(
String suffix, // null - use title from the imp
int mode_avi,
int avi_JPEG_quality,
double fps,
ImagePlus imp) throws IOException
{
String [] remove_ext = {".tiff", ".tif", ".avi"};
String x3d_path = getX3dDirectory();
String file_name = (suffix==null) ? imp.getTitle():(image_name + suffix);
String file_path = x3d_path + Prefs.getFileSeparator() + file_name; // + ".tiff";
for (String ext:remove_ext) {
if (file_path.endsWith(ext)) {
file_path = file_path.substring(0,file_path.length()-ext.length());
}
}
file_path += ".avi";
imp.getCalibration().fps = fps;
(new AVI_Writer()).writeImage (
imp, // ImagePlus imp,
file_path, // String path,
mode_avi, // int compression,
avi_JPEG_quality); //int jpegQuality)
System.out.println("saveAVIInModelDirectory(): saved "+file_path);
return file_path;
}
public double [][] readDoubleArrayFromModelDirectory(
......
......@@ -8574,12 +8574,35 @@ if (debugLevel > -100) return true; // temporarily !
final boolean updateStatus,
final int debugLevel) throws Exception
{
long start_time_all = System.nanoTime();
OpticalFlow opticalFlow = new OpticalFlow(
quadCLT_main.getNumSensors(),
clt_parameters.ofp.scale_no_lma_disparity, // double scale_no_lma_disparity,
threadsMax, // int threadsMax, // maximal number of threads to launch
updateStatus); // boolean updateStatus);
EyesisCorrectionParameters.CorrectionParameters.PathFirstLast[] pathFirstLast = null;
int num_seq = 1;
if (quadCLT_main.correctionsParameters.useSourceList) {
pathFirstLast = quadCLT_main.correctionsParameters.getSourceSets(
quadCLT_main.correctionsParameters.sourceSequencesList);
if (pathFirstLast != null) {
num_seq = pathFirstLast.length;
}
}
for (int nseq = 0; nseq < num_seq; nseq++) {
long start_time_seq = System.nanoTime();
System.out.println("\n\n\nPROCESSING SCENE SEQUENCE "+nseq+" (last is "+(num_seq-1)+")\n\n");
if (pathFirstLast != null) {
File [] scene_dirs = (new File(pathFirstLast[nseq].path)).listFiles(); // may contain non-directories, will be filtered by filterScenes
quadCLT_main.correctionsParameters.filterScenes(
scene_dirs, // File [] scene_dirs,
pathFirstLast[nseq].first, // int scene_first, // first scene to process
pathFirstLast[nseq].last); // int scene_last); // last scene to process (negative - add length
}
opticalFlow.buildSeries(
(pathFirstLast != null), //boolean batch_mode,
quadCLT_main, // QuadCLT quadCLT_main, // tiles should be set
ref_index, // int ref_index, // -1 - last
ref_step, // int ref_step,
......@@ -8594,8 +8617,14 @@ if (debugLevel > -100) return true; // temporarily !
threadsMax, // final int threadsMax, // maximal number of threads to launch
updateStatus, // final boolean updateStatus,
debugLevel+2); // final int debugLevel)
System.out.println("\n\n\nPROCESSING SCENE SEQUENCE "+nseq+" (last is "+(num_seq-1)+") is FINISHED in "+
IJ.d2s(0.000000001*(System.nanoTime()-start_time_seq),3)+" sec ("+
IJ.d2s(0.000000001*(System.nanoTime()-start_time_all),3)+" sec from the overall start");
}
System.out.println("\n\n\nPROCESSING OF "+num_seq+" SCENE SEQUENCES is FINISHED in "+
IJ.d2s(0.000000001*(System.nanoTime()-start_time_all),3)+" sec.");
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment