Commit f0ca9c38 authored by Andrey Filippov's avatar Andrey Filippov

Started ranging

parent 6f8ae877
......@@ -866,7 +866,7 @@ public class CorrectionFPN {
ImageDtt.THREADS_MAX); // int threadsMax)
if (window != null) {
center_CLT.windowPsPyD(
center_CLT.windowPxPyD(
scene_pXpYD, // final double [][] pXpYD,
window, // final Rectangle window) // window in pixels!
max_fold, // final double max_fold)
......
......@@ -135,7 +135,7 @@ public class Cuas {
imp); // ImagePlus imp)
}
}
ref_CLT.windowPsPyD(
ref_CLT.windowPxPyD(
ref_pXpYD, // final double [][] pXpYD,
window, // final Rectangle window) // window in pixels!
max_fold, // final double max_fold)
......
......@@ -114,18 +114,26 @@ public class CuasMotion {
private String [] scene_titles = null;
private String [] slice_titles = null;
private double [][][] targets = null;
private int start_frame = 0;
private boolean slow_targets = false;
public int getSeqLength() {
return clt_parameters.imp.cuas_corr_offset + clt_parameters.imp.cuas_corr_pairs;
}
public int getCorrInc() {
public int getCorrInc() { // == frame_step;
return clt_parameters.imp.cuas_half_step ? (clt_parameters.imp.cuas_corr_offset/2) : clt_parameters.imp.cuas_corr_offset;
}
public int getNumCorrSamples() {
return slice_titles.length;
}
public int getFrameCenter(int nseq) {
int frame0 = start_frame + getSeqLength()/2;
int frame_center = frame0 + nseq * getCorrInc();
return frame_center;
}
public String [] getSceneTitles() {
return scene_titles;
}
......@@ -139,6 +147,32 @@ public class CuasMotion {
this.targets = targets;
}
/**
* Calculate window for accumulation consecutive scenes around the key frame
* @param smooth use cosine window, false - rectangular one
* @return window[2*half_range + 1], sum values = 1.0;
*/
public double [] getSegmentWindow(
boolean smooth) {
final int half_range = getSeqLength()/2;
final double [] window_full = new double [2*half_range + 1];
double s0 = 1.0;
window_full[half_range] = 1.0;
double k = Math.PI/2/(half_range +0.5);
for (int i = 1; i <= half_range; i ++) {
window_full[half_range+i] = smooth ? (Math.cos(i*k)):1.0;
s0+= 2 * window_full[half_range+i];
}
for (int i = 0; i < window_full.length; i ++) {
window_full[i] /= s0;
}
return window_full;
}
public int getTilesX() {
return tilesX;
}
public CuasMotion (
......@@ -152,7 +186,7 @@ public class CuasMotion {
this.parentCLT = parentCLT;
this.scene_titles = scene_titles;
final double max_01_diff = 1E-3;
System.out.println("Setting up GPU");
System.out.println("Setting up GPU for CuasMotion class");
gpuTileProcessor = parentCLT.getGPUQuad().getGpuTileProcessor();
tilesX = parentCLT.getTilesX();
tilesY = parentCLT.getTilesY();
......@@ -161,7 +195,7 @@ public class CuasMotion {
corr_offset = clt_parameters.imp.cuas_corr_offset;
corr_pairs = clt_parameters.imp.cuas_corr_pairs;
int start_frame = 0;
// int start_frame = 0;
int num_corr_samples = (scene_titles.length - getSeqLength() - start_frame) / getCorrInc();
slice_titles = new String [num_corr_samples];
for (int nscan = 0; nscan < num_corr_samples; nscan++) {
......@@ -1861,6 +1895,40 @@ public class CuasMotion {
return vf;
}
public static double [][][] getTargetsFromHyperAugment(String path){ // add empty fields to the end of each target if shorter than CuasMotionLMA.RSLT_LEN
int [] wh = new int [2];
String [][] pvf_top_titles = new String[1][];
String [][] pvf_titles = new String[1][];
double [][][] targets_file = ShowDoubleFloatArrays.readDoubleHyperstack(
path, // String path,
wh, // int [] wh, // should be null or int[2]
pvf_top_titles, // String [][] ptop_titles, // should be null or String [1][]
pvf_titles); // String [][] pslice_titles){// should be null or String [1][]
if (targets_file == null) {
return null;
}
int num_fields = targets_file.length;
int num_fields_augmented = Math.max(num_fields, CuasMotionLMA.RSLT_LEN);
int num_seq = targets_file[0].length;
int num_tiles = targets_file[0][0].length;
double [][][] target_sequence = new double [num_seq][num_tiles][];
for (int nseq=0; nseq < num_seq; nseq++) {
for (int ntile = 0; ntile < num_tiles; ntile++) if (!Double.isNaN(targets_file[0][nseq][ntile])){
double [] v = new double[num_fields_augmented]; // cut to originally calculated fields
for (int i = 0; i < num_fields; i++) {
v[i] = targets_file[i][nseq][ntile];
}
for (int i = num_fields; i < num_fields_augmented; i++) {
v[i] = Double.NaN;
}
target_sequence[nseq][ntile] = v;
}
}
return target_sequence;
}
public static String trimSuffix(
......@@ -2571,6 +2639,51 @@ public class CuasMotion {
}
return;
}
/**
* Calculate sparse ref_pXpYD for tiles with targets, for consecutive scenes corresponding to the same keyframe nsec
* @param targets - targets for a single keyframe
* @return
*/
public double [][][] targetPxPyD(
final double [][] targets) {
final int half_accum_range = getSeqLength()/2;
final int num_tiles = tilesX * tilesY;
final int tileSize = GPUTileProcessor.DTT_SIZE;
final double [][][] pXpYDs = new double [2* half_accum_range +1][num_tiles][];
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
double [] pXpYD = new double [3];
for (int nTile = ai.getAndIncrement(); nTile < num_tiles; nTile = ai.getAndIncrement()) if (targets[nTile] != null) {
double [] target = targets[nTile];
int tileY = nTile / tilesX;
int tileX = nTile % tilesX;
double xc = tileSize * tileX + tileSize/2;
double yc = tileSize * tileY + tileSize/2;
double xtk = xc + target[CuasMotionLMA.RSLT_X];
double ytk = yc + target[CuasMotionLMA.RSLT_Y];
double vx = target[CuasMotionLMA.RSLT_VX]/corr_offset;
double vy = target[CuasMotionLMA.RSLT_VY]/corr_offset;
double disp = target[CuasMotionLMA.RSLT_DISPARITY];
if (Double.isNaN(disp)) {
disp = 0.0;
}
pXpYD[2] = disp;
for (int dseq = -half_accum_range; dseq <= half_accum_range; dseq++) {
pXpYD[0] = xtk + vx * dseq;
pXpYD[1] = ytk + vy * dseq;
pXpYDs[dseq + half_accum_range][nTile] = pXpYD.clone();
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return pXpYDs;
}
/**
......@@ -9339,9 +9452,9 @@ public class CuasMotion {
String x3d_path = parentCLT.getX3dDirectory();
reuse_path = x3d_path + Prefs.getFileSeparator() + model_prefix + reuse_path+".tiff";
}
targets_nonconflict = getTargetsFromHyper(reuse_path);
targets_nonconflict = getTargetsFromHyperAugment(reuse_path);
if (targets_nonconflict == null) {
System.out.println("processMovingTargetsMulti(): failed to read target data from "+reuse_path);
System.out.println("processMovingTargetsMulti(): FAILED TO READ TARGET DATA FROM "+reuse_path);
} else {
System.out.println("processMovingTargetsMulti(): re-using target data from "+reuse_path);
}
......@@ -9921,6 +10034,7 @@ public class CuasMotion {
double slow_ra = clt_parameters.imp.cuas_slow_ra;
double slow_score = clt_parameters.imp.cuas_slow_score;
boolean generate_output =clt_parameters.imp.cuas_generate; // generate and save targets Tiff and/or video files
boolean clean_video = clt_parameters.imp.cuas_clean_video; //true;// save video without any debug information for targets, output in TIFF files. False - same output for video and TIFFs
boolean reuse_targets = clt_parameters.imp.cuas_reuse_targets; // true; // read previously calculated non-conflict (one per tile) targets
String reuse_path = clt_parameters.imp.cuas_reuse_path; // ""; // either suffix (all parameters the same) or the full path (contains "/")
......@@ -9942,9 +10056,9 @@ public class CuasMotion {
String x3d_path = parentCLT.getX3dDirectory();
reuse_path = x3d_path + Prefs.getFileSeparator() + model_prefix + reuse_path+".tiff";
}
targets_nonconflict = getTargetsFromHyper(reuse_path);
targets_nonconflict = getTargetsFromHyperAugment(reuse_path);
if (targets_nonconflict == null) {
System.out.println("processMovingTargetsMulti(): failed to read target data from "+reuse_path);
System.out.println("processMovingTargetsMulti(): FAILED TO READ TARGET DATA FROM "+reuse_path);
} else {
System.out.println("processMovingTargetsMulti(): re-using target data from "+reuse_path);
}
......@@ -9996,7 +10110,7 @@ public class CuasMotion {
}
setTargets(targets_nonconflict);
if (generate_output) {
generateExtractFilterMovingTargets( // move parameters to clt_parameters
false, // final boolean video_pass, // if clt_parameters.cuas_clean_video=true, video_pass=0 - output TIFFS, but no video. If video_pass==1 - only video with no debug
batch_mode, // final boolean batch_mode,
......@@ -10011,6 +10125,10 @@ public class CuasMotion {
targets_nonconflict, // final double [][][] vf_sequence, // center tiles (not extended), null /non-null only
debugLevel); // final int debugLevel)
}
} else {
System.out.println("Output images/videos with detected targets is disabled by \"Generate and save detected targets\" parameter, skipping it.");
}
return;
}
......
......@@ -86,8 +86,10 @@ public class CuasMotionLMA {
public static final int RSLT_SLOW = 41; // 1 - slow, 0 - fast
public static final int RSLT_WHEN = 42;
public static final int RSLT_FAIL = 43;
public static final int RSLT_DISPARITY = 44;
public static final int RSLT_LEN = RSLT_FAIL+1;
public static final int RSLT_LEN = RSLT_DISPARITY+1;
public static final String [] LMA_TITLES =
{"X-OFFS","Y-OFFS", "AMPLITUDE", "RADIUS","RAD_POS", "OVERSHOOT","OFFSET","RMSE","RMSE/A","MAX2A","ITERATIONS",
......@@ -102,7 +104,8 @@ public class CuasMotionLMA {
"*MOTION-SCORE",
"*Q-AMPL","*Q-RMSE","*Q-RMSE/A","*Q-CENTER","*Q-MATCH","*Q-LENGTH","*QTRAVEL","*Q-SCORE",
"Stronger","Slow",
"WHEN", "FAILURE"};
"WHEN", "FAILURE",
"Disparity"};
public static final int FAIL_NONE = 0;
public static final int FAIL_MOTION = 1; // motion strength/fraction too low
......
......@@ -1399,27 +1399,6 @@ public class ImageDtt extends ImageDttCPU {
globalDebugLevel); // final int globalDebugLevel)
/*
final float [][] lpf_rgb = new float[][] {
floatGetCltLpfFd(gpu_sigma_r),
floatGetCltLpfFd(gpu_sigma_b),
floatGetCltLpfFd(gpu_sigma_g),
floatGetCltLpfFd(gpu_sigma_m)
};
gpuQuad.setLpfRbg( // constants memory - same for all cameras
lpf_rgb,
globalDebugLevel > 2);
gpuQuad.setTasks( // copy tp_tasks to the GPU memory
tp_tasks, // TpTask [] tile_tasks,
false, // use_aux); // boolean use_aux)
imgdtt_params.gpu_verify); // boolean verify
// Why always NON-UNIFORM grid? Already set in tp_tasks
gpuQuad.execSetTilesOffsets(false); // false); // prepare tiles offsets in GPU memory, using NON-UNIFORM grid (pre-calculated)
// update tp_tasks
gpuQuad.updateTasks(
tp_tasks,
false); // boolean use_aux // while is it in class member? - just to be able to free
*/
if (fclt != null) {
gpuQuad.handleWH( // allocate/reallocate GPU memory, that was normally done by gpuQuad.execConvertDirect()
wh, // int [] wh,
......@@ -1491,6 +1470,26 @@ public class ImageDtt extends ImageDttCPU {
return;
}
public void preSetReferenceTD( // do not run execConvertDirect, exit after updating tasks
final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
final TpTask[] tp_tasks,
final int globalDebugLevel)
{
gpuQuad.setTasks( // copy tp_tasks to the GPU memory
tp_tasks, // TpTask [] tile_tasks,
false, // use_aux); // boolean use_aux)
imgdtt_params.gpu_verify); // boolean verify
// Why always NON-UNIFORM grid? Already set in tp_tasks
gpuQuad.execSetTilesOffsets(false); // false); // prepare tiles offsets in GPU memory, using NON-UNIFORM grid (pre-calculated)
// update tp_tasks - execute even w/o execSetTilesOffsets
gpuQuad.updateTasks(
tp_tasks,
false); // boolean use_aux // while is it in class member? - just to be able to free
return;
}
public void setRectilinearReferenceTD(
......
......@@ -6410,7 +6410,7 @@ public class OpticalFlow {
if (generate_mapped) {
double [][] ds_vantage = new double[][] {selected_disparity,selected_strength};
if ((views[ibase][0] != 0) || (views[ibase][1] != 0) || (views[ibase][2] != 0) || (master_CLT.hasCenterClt()) && (mode3d > 0)) {
boolean debug_vantage = true;
boolean debug_vantage = false; // true;
//dls
double [][] dbg_vantage = debug_vantage ? (new double[7][]): null;
if (dbg_vantage != null) {
......@@ -8280,9 +8280,6 @@ public class OpticalFlow {
double [][] dxyzatr_dt = null;
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
if (mb_en) {
// dxyzatr_dt = getVelocities(
// quadCLTs, // QuadCLT [] quadCLTs,
// nscene); // int nscene)
dxyzatr_dt = new double[][] { // for all, including ref
quadCLTs[nscene].getErsCorrection().getErsXYZ_dt(),
quadCLTs[nscene].getErsCorrection().getErsATR_dt()};
......
......@@ -59,6 +59,8 @@ import com.elphel.imagej.ims.UasLogReader;
import ij.IJ;
import ij.ImagePlus;
import ij.ImageStack;
//import java.io.FilterOutputStream;
//import ij.Prefs;
//import ij.io.FileSaver;
public class QuadCLT extends QuadCLTCPU {
......@@ -1680,9 +1682,7 @@ public class QuadCLT extends QuadCLTCPU {
scene_atr, // double [] scene_atr, // camera orientation relative to world frame
scene, //final QuadCLT scene,
ref_scene, // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
// toRGB, // final boolean toRGB,
show_nan, // final boolean show_nan,
// suffix, // String suffix,
threadsMax, // int threadsMax,
debugLevel); // final int debugLevel)
int [] wh = (full_woi_in == null)? null: new int[]{
......@@ -1701,6 +1701,64 @@ public class QuadCLT extends QuadCLTCPU {
return imp_render;
}
public static double[][] renderDoubleGPUFromDSI(
final int sensor_mask,
final boolean merge_channels,
final Rectangle full_woi_in, // show larger than sensor WOI in tiles (or null)
CLTParameters clt_parameters,
double [] disparity_ref,
double [][] ref_pXpYD, // alternative to disparity_ref when reference is not uniform
// motion blur compensation
double mb_tau, // 0.008; // time constant, sec
double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double [][] mb_vectors, // now [2][ntiles];
double [] scene_xyz, // camera center in world coordinates. If null - no shift, no ers
double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
final boolean toRGB,
final boolean show_nan,
final int debugLevel){
preRenderGPUFromDSI(
sensor_mask, // final int sensor_mask,
merge_channels, // final boolean merge_channels,
0, // final int discard_border,
0, // final double max_fold,
0, // final int min_in_row_col, // Minimal number of defined tiles in a row/column
full_woi_in, // final Rectangle full_woi_in, // show larger than sensor WOI in tiles (or null)
clt_parameters, // CLTParameters clt_parameters,
disparity_ref, // double [] disparity_ref,
ref_pXpYD, // double [][] ref_pXpYD, // alternative to disparity_ref when reference is not uniform
// motion blur compensation
mb_tau, // double mb_tau, // 0.008; // time constant, sec
mb_max_gain, // double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
mb_vectors, // double [][] mb_vectors, // now [2][ntiles];
scene_xyz, // double [] scene_xyz, // camera center in world coordinates. If null - no shift, no ers
scene_atr, // double [] scene_atr, // camera orientation relative to world frame
scene, //final QuadCLT scene,
ref_scene, // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
show_nan, // final boolean show_nan,
ImageDtt.THREADS_MAX, // threadsMax, // int threadsMax,
debugLevel); // final int debugLevel)
int [] wh = (full_woi_in == null)? null: new int[]{
full_woi_in.width * GPUTileProcessor.DTT_SIZE,
full_woi_in.height * GPUTileProcessor.DTT_SIZE};
double [][] drender = scene.renderDoubleFromTDMono ( // [scene][color][pixel]
sensor_mask, // int sensor_mask,
wh, // int [] wh, // may be null, or {width, height}
false); // boolean use_reference)
return drender;
}
public static float [][] getTDCombo(
final int sensor_mask,
final boolean merge_channels,
......@@ -1720,7 +1778,6 @@ public class QuadCLT extends QuadCLTCPU {
double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
// final boolean toRGB,
final boolean show_nan,
final double [][][] dbg_PxPyD_slice,
int threadsMax,
......@@ -1826,7 +1883,7 @@ public class QuadCLT extends QuadCLTCPU {
}
}
if (window != null) {
ref_scene.windowPsPyD(
ref_scene.windowPxPyD(
pXpYD, // final double [][] pXpYD,
window, // final Rectangle window) // window in pixels!
max_fold, // final double max_fold)
......@@ -1947,7 +2004,7 @@ public class QuadCLT extends QuadCLTCPU {
return pXpYD;
}
public void windowPsPyD(
public void windowPxPyD(
final double [][] pXpYD,
final Rectangle window, // window in pixels!
final double max_fold,
......@@ -6012,4 +6069,105 @@ if (debugLevel < -100) {
debugLevel); // final int debugLevel)
}
public static double [][][] unsharpMaskSourceMono(
final QuadCLT[] scenes,
final int start_scene,
final int num_scenes,
final boolean um_en,
final boolean unsharped,
final double um_sigma,
final double um_weight,
final int debugLevel){
double [][][][] um_color = unsharpMaskSource(
scenes, // final QuadCLT[] scenes,
start_scene, // final int start_scene,
num_scenes, // final int num_scenes,
um_en, // final boolean um_en,
unsharped, // final boolean unsharped,
false, // update_source, // final boolean update_source, // if update_source will not generate output, return null
um_sigma, // final double um_sigma,
um_weight, // final double um_weight,
debugLevel); // final int debugLevel)
double [][][] um_mono = new double [um_color.length][um_color[0].length][];
for (int nscene = 0; nscene < um_mono.length; nscene++) {
for (int nsens = 0; nsens < um_mono[nscene].length; nsens++) {
um_mono[nscene][nsens] = um_color[nscene][nsens][0];
}
}
return um_mono;
}
public static double [][][][] unsharpMaskSource(
final QuadCLT[] scenes,
final int start_scene,
final int num_scenes,
final boolean um_en,
final boolean unsharped,
final boolean update_source, // if update_source will not generate output, return null
final double um_sigma,
final double um_weight,
final int debugLevel){
final int num_sens = scenes[start_scene].getNumSensors();
int [] whc = scenes[start_scene].getWHC(false);
final int width = whc[0];
final int height = whc[1];
final int num_col = whc[2];
final double [][][][] images_um = update_source? null : new double [num_scenes][num_sens][num_col][];
final int num_sens_col = num_sens*num_col;
final int num_img = num_scenes*num_sens_col;
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
double [] img_buf = new double [width * height];
DoubleGaussianBlur gb = new DoubleGaussianBlur();
// may be faster if process only where vector_field[nseq][ntile] is not null
for (int nImg = ai.getAndIncrement(); nImg < num_img; nImg = ai.getAndIncrement()) {
int dscene = nImg / num_sens_col;
int nscene = dscene + start_scene;
int nsens_col = nImg % num_sens_col;
int nsens = nsens_col / num_col;
int ncol = nsens_col % num_col;
double [] img_src = scenes[nscene].getImageData()[nsens][ncol];
if (!unsharped && um_en) {
System.arraycopy(img_src, 0, img_buf, 0, img_buf.length);
gb.blurDouble(
img_buf, //
width,
height,
um_sigma, // double sigmaX,
um_sigma, // double sigmaY,
0.01); // double accuracy)
if (um_weight != 1.0) {
for (int i = 0; i < img_buf.length; i++) {
img_buf[i] *= um_weight;
}
}
if (update_source) {
for (int i = 0; i < img_buf.length; i++) {
img_src[i] -= img_buf[i];
}
} else {
for (int i = 0; i < img_buf.length; i++) {
img_buf[i] = img_src[i] - img_buf[i];
}
images_um[dscene][nsens][ncol] = img_buf.clone();
}
} else { // just copy
images_um[dscene][nsens][ncol] = img_src.clone();
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return images_um;
}
}
......@@ -172,6 +172,7 @@ public class QuadCLTCPU {
public String image_path = null;
double [] gps_lla = null;
public double [][][] image_data = null; // [channel][color][pixel]
public double [][][] image_data_alt = null; // [channel][color][pixel] If not null - will use instead of image_data
public double [][] image_center = null; // reference to a common sensor-combined center view image (set for center instance)
public boolean no_kernels = false; // debug feature
......@@ -6317,11 +6318,20 @@ public class QuadCLTCPU {
public double [][][] getResetImageData(){
new_image_data = false;
if (image_data_alt != null) {
return image_data_alt;
} else {
return image_data;
}
}
public double [][][] getImageData(){ // does not reset new data
if (image_data_alt != null) {
return image_data_alt;
} else {
return image_data;
}
}
public void setImageData(double [][][] data) {
image_data = data;
new_image_data = true;
......@@ -6329,9 +6339,17 @@ public class QuadCLTCPU {
image_fpn = null;
}
public void setImageDataAlt(double [][][] data) {
image_data_alt = data;
new_image_data = true; // even if null set new data as it will use image_data after image_data_alt
// image_fpn_applied = null;
// image_fpn = null;
}
public double [][] getResetImageCenter(){
new_image_data = false;
boolean debug = false;
boolean debug = true; // false;
if (debug) {
if (image_center == null) {
System.out.println("--- getResetImageCenter(): image_center== null");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment