Commit 7ccf095c authored by Andrey Filippov's avatar Andrey Filippov

working on textures to look behind on BG using only subset of cameras

parent f6006466
......@@ -2290,10 +2290,10 @@ public class GpuQuad{ // quad camera description
int [] cpu_num_texture_tiles = new int[8];
// cuMemcpyDtoH(Pointer.to(cpu_woi), gpu_woi, cpu_woi.length * Sizeof.INT); // hope that Float.floatToIntBits(fcorr_indices[i]) is not needed
cpu_woi[0] = width;
cpu_woi[1] = height;
cpu_woi[2] = 0;
cpu_woi[3] = 0;
cpu_woi[0] = width; // larger than any x
cpu_woi[1] = height; // larger than any y
cpu_woi[2] = 0; // smaller or equal than any x
cpu_woi[3] = 0; // smaller or equal than any y
cuMemcpyHtoD(gpu_woi, Pointer.to(cpu_woi), cpu_woi.length * Sizeof.INT);
// cuMemcpyDtoH(Pointer.to(cpu_woi), gpu_woi, cpu_woi.length * Sizeof.INT); //just for testing
......@@ -2395,6 +2395,9 @@ public class GpuQuad{ // quad camera description
int texture_width = (cpu_woi[2] + 1) * GPUTileProcessor.DTT_SIZE;
int texture_tiles_height = (cpu_woi[3] + 1) * GPUTileProcessor.DTT_SIZE;
int texture_slices = num_colors + 1;
if ((keep_weights & 2) != 0) {
texture_slices += num_colors * num_cams;
}
int blocks_x2 = ((texture_width +
((1 << (GPUTileProcessor.THREADS_DYNAMIC_BITS + GPUTileProcessor.DTT_SIZE_LOG2 )) - 1)) >>
......@@ -3382,6 +3385,7 @@ public class GpuQuad{ // quad camera description
return textures;
}
/*
public static double [][][][] doubleTextures( // not used
Rectangle woi,
int [] indices,
......@@ -3407,7 +3411,7 @@ public class GpuQuad{ // quad camera description
}
return textures;
}
*/
public static double [][][][] doubleTextures( // may be accelerated with multithreading if needed.
Rectangle woi, // null or width and height match texture_tiles
double [][][][] texture_tiles, // null or [tilesY][tilesX]
......
......@@ -692,12 +692,14 @@ public class ImageDtt extends ImageDttCPU {
true, // boolean calc_textures,
false, // boolean calc_extra
false); // boolean linescan_order)
int num_src_slices = numcol + 1; // + (clt_parameters.keep_weights?(ports + numcol + 1):0); // 12 ; // calculate
// int num_src_slices = numcol + 1; // + (clt_parameters.keep_weights?(ports + numcol + 1):0); // 12 ; // calculate
int num_src_slices = numcol + 1 + ((keep_weights!=0)?(getNumSensors() + numcol + 1):0);
int num_out_slices = numcol + 1 + ((keep_weights!=0)?(getNumSensors()):0); // 18
int [] texture_indices = gpuQuad.getTextureIndices();
float [] flat_textures = gpuQuad.getFlatTextures(
texture_indices.length,
numcol, // int num_colors,
false); // clt_parameters.keep_weights); // boolean keep_weights);
(keep_weights != 0)); // clt_parameters.keep_weights); // boolean keep_weights);
int tilesX = gpuQuad.img_width / GPUTileProcessor.DTT_SIZE;
int tilesY = gpuQuad.img_height / GPUTileProcessor.DTT_SIZE;
double [][][][] texture_tiles = new double [tilesY][tilesX][][];
......@@ -707,7 +709,7 @@ public class ImageDtt extends ImageDttCPU {
texture_indices, // int [] indices,
flat_textures, // float [][][] ftextures,
tilesX, // int full_width,
isMonochrome()? 2: 4, // rbga only /int num_slices Same number
num_out_slices, // isMonochrome()? 2: 4, // rbga only /int num_slices Same number
num_src_slices // int num_src_slices
);
return texture_tiles;
......
......@@ -1479,6 +1479,7 @@ public class QuadCLT extends QuadCLTCPU {
* @param max_distortion maximal neighbor tiles offset as a fraction of tile size (8)
* @param cluster_index which cluster the tile belongs - to verify tile distortions
* @param border border tiles, may have neighbors (other border tiles) over discontinuity
* @param keep_channels output per-channel Y(G) after YA (RBGA)
* @param debugLevel
* @return
*/
......@@ -1497,6 +1498,7 @@ public class QuadCLT extends QuadCLTCPU {
final double max_distortion, // maximal neighbor tiles offset as a fraction of tile size (8)
final int [] cluster_index, //
final boolean [] border, // border tiles
final boolean keep_channels,
final int debugLevel){
// FIXME: Move to clt_parameters;
final double max_overlap = 0.6;
......@@ -1647,6 +1649,7 @@ public class QuadCLT extends QuadCLTCPU {
OpticalFlow.THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
}
// now obeys keep_weights
final double [][][][] texture_tiles = image_dtt.process_texture_tiles( // from transform domain all sensors to combined texture
clt_parameters.corr_red, // double corr_red,
clt_parameters.corr_blue, // double corr_blue,
......@@ -1766,6 +1769,348 @@ public class QuadCLT extends QuadCLTCPU {
return texture_tiles;
}
public static double [][][][] texturesNoOverlapGPUFromDSI(
CLTParameters clt_parameters,
double [] disparity_ref,
// motion blur compensation
double mb_tau, // 0.008; // time constant, sec
double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double [][] mb_vectors, // now [2][ntiles];
final double [] scene_xyz, // camera center in world coordinates
final double [] scene_atr, // camera orientation relative to world frame
final QuadCLT scene,
final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
final boolean filter_bg, // remove bg tiles (possibly occluded)
final double max_distortion, // maximal neighbor tiles offset as a fraction of tile size (8)
final int [] cluster_index, //
final boolean [] border, // border tiles
final boolean keep_channels,
final int debugLevel){
// FIXME: Move to clt_parameters;
final double max_overlap = 0.6;
final double min_adisp_cam = 0.2;
final double min_rdisp_cam = 0.03;
int keep_weights = 2; // (clt_parameters.replace_weights? 2 : 0); // just 0/2
double [][] pXpYD_prefilter =OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
null, // full_woi_in, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref, // final double [] disparity_ref, // invalid tiles - NaN in disparity
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scene, // final QuadCLT scene_QuadClt,
ref_scene, // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
OpticalFlow.THREADS_MAX); // int threadsMax)
double [][] pXpYD;
if (filter_bg) {
double [][] scene_ds = OpticalFlow.conditionInitialDS(
clt_parameters, // CLTParameters clt_parameters,
scene, // QuadCLT scene,
-1); // int debug_level);
if (scene_ds != null) {
double [] disparity_cam = scene_ds[0]; // null; // for now
pXpYD = OpticalFlow.filterBG (
ref_scene.getTileProcessor(), // final TileProcessor tp,
pXpYD_prefilter, // final double [][] pXpYD,
max_overlap, // final double max_overlap,
null, // disparity_cam, // final double [] disparity_cam,
min_adisp_cam, // final double min_adisp_cam,
min_rdisp_cam, // final double min_rdisp_cam,
clt_parameters.tileX, // final int dbg_tileX,
clt_parameters.tileY, // final int dbg_tileY,
0); // 1); //debug_level); // final int debug_level);
} else {
pXpYD = pXpYD_prefilter;
}
} else {
pXpYD = pXpYD_prefilter;
}
int rendered_width = scene.getErsCorrection().getSensorWH()[0];
boolean showPxPyD = false;
if (showPxPyD) {
int dbg_width = rendered_width/GPUTileProcessor.DTT_SIZE;
int dbg_height = pXpYD.length/dbg_width;
double [][] dbg_img = new double [3 + ((mb_vectors!=null)? 2:0)][pXpYD.length];
String [] dbg_titles = (mb_vectors!=null)?
(new String[] {"pX","pY","Disparity","mb_X","mb_Y"}):
(new String[] {"pX","pY","Disparity"});
for (int i = 0; i < dbg_img.length; i++) {
Arrays.fill(dbg_img[i], Double.NaN);
}
for (int nTile = 0; nTile < pXpYD.length; nTile++){
if (pXpYD[nTile] != null) {
for (int i = 0; i < pXpYD[nTile].length; i++) {
dbg_img[i][nTile] = pXpYD[nTile][i];
}
}
if (mb_vectors!=null) {
for (int i = 0; i <2; i++) {
dbg_img[3 + i][nTile] = mb_tau * mb_vectors[i][nTile];
}
}
}
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_img,
dbg_width,
dbg_height,
true,
scene.getImageName()+"-pXpYD",
dbg_titles);
}
TpTask[][] tp_tasks;
if (mb_vectors!=null) {
tp_tasks = GpuQuad.setInterTasksMotionBlur( // "true" reference, with stereo actual reference will be offset
scene.getNumSensors(),
rendered_width, // should match output size, pXpYD.length
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
// motion blur compensation
mb_tau, // final double mb_tau, // 0.008; // time constant, sec
mb_max_gain, // final double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
mb_vectors, //final double [][] mb_vectors, //
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
0.0, // final double disparity_corr,
-1, // 0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
OpticalFlow.THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
} else {
tp_tasks = new TpTask[1][];
tp_tasks[0] = GpuQuad.setInterTasks( // "true" reference, with stereo actual reference will be offset
scene.getNumSensors(),
rendered_width, // should match output size, pXpYD.length
!scene.hasGPU(), // final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD, // final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
scene.getErsCorrection(), // final GeometryCorrection geometryCorrection,
0.0, // final double disparity_corr,
-1, // 0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null, // final boolean [] valid_tiles,
OpticalFlow.THREADS_MAX); // final int threadsMax) // maximal number of threads to launch
}
if (tp_tasks[0].length == 0) {
if (debugLevel > -1) {
System.out.println("texturesGPUFromDSI(): no tiles to process");
}
return null;
}
/// scene.saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
ImageDtt image_dtt = new ImageDtt(
scene.getNumSensors(),
clt_parameters.transform_size,
clt_parameters.img_dtt,
scene.isAux(),
scene.isMonochrome(),
scene.isLwir(),
clt_parameters.getScaleStrength(scene.isAux()),
scene.getGPU());
boolean use_reference = false;
int erase_clt = 0; // show_nan ? 1:0;
if (mb_vectors!=null) {// && test1) {
image_dtt.setReferenceTDMotionBlur( // change to main?
erase_clt, //final int erase_clt,
null, // wh, // null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference, // true, // final boolean use_reference_buffer,
tp_tasks, // final TpTask[] tp_tasks,
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
OpticalFlow.THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
} else {
image_dtt.setReferenceTD( // change to main?
erase_clt, //final int erase_clt,
null, // wh, // null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters.img_dtt, // final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference, // true, // final boolean use_reference_buffer,
tp_tasks[0], // final TpTask[] tp_tasks,
clt_parameters.gpu_sigma_r, // final double gpu_sigma_r, // 0.9, 1.1
clt_parameters.gpu_sigma_b, // final double gpu_sigma_b, // 0.9, 1.1
clt_parameters.gpu_sigma_g, // final double gpu_sigma_g, // 0.6, 0.7
clt_parameters.gpu_sigma_m, // final double gpu_sigma_m, // = 0.4; // 0.7;
OpticalFlow.THREADS_MAX, // final int threadsMax, // maximal number of threads to launch
debugLevel); // final int globalDebugLevel);
}
// now obeys keep_weights
Rectangle woi = new Rectangle(); // will be filled out to match actual available image
// int keep_weights = (clt_parameters.keep_weights? 1 : 0) + (clt_parameters.replace_weights? 2 : 0);
// int keep_weights = (clt_parameters.replace_weights? 2 : 0); // just 0/2
int text_colors = (scene.isMonochrome() ? 1 : 3);
int texture_layers = (text_colors + 1)+((keep_weights != 0)?(text_colors * scene.getNumSensors()):0);
double [] col_weights = new double[text_colors];
if (text_colors < 3) {
col_weights[0] = 1.0;
} else {
col_weights[2] = 1.0/(1.0 + clt_parameters.corr_red + clt_parameters.corr_blue); // green color
col_weights[0] = clt_parameters.corr_red * col_weights[2];
col_weights[1] = clt_parameters.corr_blue * col_weights[2];
}
scene.gpuQuad.execRBGA(
col_weights, // double [] color_weights,
scene.isLwir(), // boolean is_lwir,
clt_parameters.min_shot, // double min_shot, // 10.0
clt_parameters.scale_shot, // double scale_shot, // 3.0
clt_parameters.diff_sigma, // double diff_sigma, // pixel value/pixel change Used much larger sigma = 10.0 instead of 1.5
clt_parameters.diff_threshold, // double diff_threshold, // pixel value/pixel change
clt_parameters.min_agree, // double min_agree, // minimal number of channels to agree on a point (real number to work with fuzzy averages)
clt_parameters.dust_remove, // boolean dust_remove,
keep_weights); // int keep_weights)
float [][] rbga = scene.gpuQuad.getRBGA(
texture_layers - 1, // (isMonochrome() ? 1 : 3), // int num_colors,
woi); // woi in pixels
boolean show_rbga= false;
if (show_rbga) {
ShowDoubleFloatArrays.showArrays( // show slices RBGA (colors - 256, A - 1.0)
rbga,
woi.width,
woi.height,
true,
scene.getImageName()+"-RGBA-STACK-D"+clt_parameters.disparity+
":"+clt_parameters.gpu_woi_tx+":"+clt_parameters.gpu_woi_ty+
":"+clt_parameters.gpu_woi_twidth+":"+clt_parameters.gpu_woi_theight+
":"+(clt_parameters.gpu_woi_round?"C":"R")
//,new String[] {"R","B","G","A"}
);
show_rbga = false;
}
// if (debugLevel > -1000) {
// return null;
// }
final int tilesX = scene.getTileProcessor().getTilesX();
final int tilesY = scene.getTileProcessor().getTilesY();
final int tiles = tilesX*tilesY;
int tile_size = scene.getTileProcessor().getTileSize();
int tile_len = tile_size*tile_size;
final double [][][][] texture_tiles88 = new double [tilesY][tilesX][][]; // here - non-overlapped!
// copy from windowed output to
final TpTask[] ftp_tasks = tp_tasks[0];
final Rectangle fwoi = woi;
final Thread[] threads = ImageDtt.newThreadArray(OpticalFlow.THREADS_MAX);
final AtomicInteger ai = new AtomicInteger(0);
final double [][][] tileXY = new double [tilesY][tilesX][];
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int itile = ai.getAndIncrement(); itile < ftp_tasks.length; itile = ai.getAndIncrement()) {
TpTask task = ftp_tasks[itile];
if (task != null) {
int x0 = tile_size * task.tx - fwoi.x;
int y0 = tile_size * task.ty - fwoi.y;
if ((x0 >=0) && (y0>=0) && (x0 < fwoi.width) && (y0 < fwoi.height)) {
int sindx0 = x0 + y0 * fwoi.width;
texture_tiles88[task.ty][task.tx] = new double [rbga.length][tile_len];
for (int nchn = 0; nchn < rbga.length; nchn++) {
int indx=0;
double [] tt = texture_tiles88[task.ty][task.tx][nchn];
for (int row = 0; row <tile_size; row++) {
int sindx = sindx0 + fwoi.width * row;
for (int col = 0; col <tile_size; col++) {
tt[indx++] = rbga[nchn][sindx++];
}
}
}
} else {
System.out.println("texturesNoneoverlapGPUFromDSI() task tile outside of texture:\n"+
"task.tx="+task.tx+", task.ty="+task.ty+", woi.x="+fwoi.x+", woi.y="+fwoi.y+
", woi.width="+fwoi.width+", woi.height="+fwoi.height);
}
tileXY[task.ty][task.tx] = task.getDoubleCenterXY();
}
}
}
};
}
ImageDtt.startAndJoin(threads);
if (max_distortion > 0) {// remove distorted tiles
double max_distortion2 = max_distortion * max_distortion;
final TileNeibs tn = new TileNeibs(tilesX, tilesY);
final boolean [] distorted = new boolean [tiles];
ai.set(0);
final double [] dbg_distort = (debugLevel>2)? (new double [tiles]):null;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) {
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
if (tileXY[tileY][tileX] != null) {
double [] centerXY =tileXY[tileY][tileX];
if ((centerXY != null) && (cluster_index[nTile] >= 0)){
// see if any tile
for (int dir = 0; dir < TileNeibs.DIRS; dir++) {
int tile1 = tn.getNeibIndex(nTile, dir);
if (tile1 >= 0) {
if (cluster_index[tile1] == cluster_index[nTile]) {
int tileX1 = tn.getX(tile1);
int tileY1 = tn.getY(tile1);
double [] thisXY = tileXY[tileY1][tileX1];
// border-border neighbor may have discontinuity even belonging to the same cluster
// (coming close AGAIN)
if ((thisXY != null) && !(border[nTile] && border[tile1])) {
double dx = thisXY[0] - centerXY[0] - tile_size * TileNeibs.getDX(dir);
double dy = thisXY[1] - centerXY[1] - tile_size * TileNeibs.getDY(dir);
double e2 = dx*dx+dy*dy;
if (dbg_distort != null) {
if (e2 > dbg_distort[nTile]) {
dbg_distort[nTile] = Math.sqrt(e2);
}
} else {
if (e2 > max_distortion2) {
distorted[nTile] = true;
break;
}
}
} else {
continue; // check why task is null here for >=0 cluster index
}
}
}
}
} else {
if (debugLevel > -3) {
System.out.println("Non-null texture for no-cluster, nTile="+nTile+
", tileX="+tileX+", tileY="+tileY+", cluster_index["+nTile+"]="+cluster_index[nTile]);
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
ai.set(0);
if (dbg_distort != null) {
ShowDoubleFloatArrays.showArrays( // out of boundary 15
dbg_distort,
tilesX,
tilesY,
"test-distort");
}
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < distorted.length; nTile = ai.getAndIncrement()) if (distorted[nTile]){
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
if (texture_tiles88[tileY] != null) {
texture_tiles88[tileY][tileX] = null;
}
}
}
};
}
ImageDtt.startAndJoin(threads);
}
return texture_tiles88;
}
......
......@@ -628,6 +628,25 @@ public class TexturedModel {
scenes_sel[i] = true;
}
boolean renormalize = true;// false - use normalizations from previous scenes to keep consistent colors
getInterCombinedTexturesNew( // return ImagePlus[] matching tileClusters[], with alpha
clt_parameters, // final CLTParameters clt_parameters,
colorProcParameters, // ColorProcParameters colorProcParameters,
rgbParameters, // EyesisCorrectionParameters.RGBParameters rgbParameters,
parameter_scene, // final QuadCLT parameter_scene, // to use for rendering parameters in multi-series sequences
// if null - use reference scene
scenes, // final QuadCLT [] scenes,
scenes_sel, // final boolean [] scenes_sel, // null or which scenes to process
null, // final boolean [] selection, // may be null, if not null do not process unselected tiles
tileClusters, // final TileCluster [] tileClusters, // disparities, borders, selections for texture passes
// final int margin,
renormalize, // final boolean renormalize, // false - use normalizations from previous scenes to keep consistent colors
debugLevel); // final int debug_level)
if (debugLevel > -1000) {
return false;
}
ImagePlus[] combined_textures = getInterCombinedTextures( // return ImagePlus[] matching tileClusters[], with alpha
clt_parameters, // final CLTParameters clt_parameters,
colorProcParameters, // ColorProcParameters colorProcParameters,
......@@ -656,6 +675,12 @@ public class TexturedModel {
1); //
}
}
// ********************* just for testing ************************************
if (debugLevel > -1000) {
return false;
}
// Maybe will switch to combined textures (less files)
ImagePlus [] imp_textures = splitCombinedTextures(
tileClusters, // TileCluster [] tileClusters, //should have name <timestamp>-*
......@@ -931,6 +956,7 @@ public class TexturedModel {
scenes[nscene].getErsCorrection().getErsATR_dt()};
}
scenes[nscene].saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU)
boolean keep_channels = false;
for (int nslice = 0; nslice < num_slices; nslice++) { // prepare and measure textures for each combo textures
final double [] disparity_ref = tileClusters[nslice].getDisparity(); // disparity in the reference view tiles (Double.NaN - invalid)
// Motion blur vectors are individual per-slice
......@@ -964,6 +990,7 @@ public class TexturedModel {
max_distortion, // final double max_distortion, // maximal neighbor tiles offset as a fraction of tile size (8)
cluster_indices[nslice], // final int [] cluster_index, //
borders[nslice], // final boolean [] border, // border tiles
keep_channels, // final boolean keep_channels,
debug_level); // final int debugLevel);
if (slice_texture != null) {
// Use MB vectors for texture weights
......@@ -1014,7 +1041,7 @@ public class TexturedModel {
}
} // for (int nslice = 0; nslice < num_slices; nslice++) {
} // for (int nscene = 0; nscene < num_scenes; nscene++) {
// Divide accumulated data by weights
double [][][] faded_textures = new double [num_slices][][];
final double [][] dbg_weights = (debug_level > 0 )?(new double [num_slices][tiles]) : null;
final double [][] dbg_overlap = (debug_level > 0 )?(new double [num_slices*num_channels][]) : null;
......@@ -1072,6 +1099,29 @@ public class TexturedModel {
}
}
}
if (debug_level > -1) {
double [][] dbg_textures = new double [faded_textures.length * faded_textures[0].length][faded_textures[0][0].length];
String [] dbg_titles = new String[dbg_textures.length];
String [] dbg_subtitles = new String [faded_textures[0].length];
for (int i = 0; i < dbg_subtitles.length; i++) {
dbg_subtitles[i] = (i < (dbg_subtitles.length -1)) ? ("Y"+i):"alpha";
}
for (int i = 0; i < dbg_textures.length; i++) {
dbg_textures[i] = faded_textures[i / faded_textures[0].length][i % faded_textures[0].length];
dbg_titles[i] = dbg_subtitles[i % dbg_subtitles.length] + "-" + (i / dbg_subtitles.length);
}
ShowDoubleFloatArrays.showArrays(
dbg_textures,
tilesX * transform_size,
tilesY * transform_size,
true,
ref_scene.getImageName()+"-combined_textures-prenorm-pre_UM",
dbg_titles);
}
// Optionally apply UM (before auto/manual range)
if (tex_um) {
QuadCLTCPU.umTextures(
......@@ -1248,6 +1298,839 @@ public class TexturedModel {
return imp_tex; // ImagePlus[] ? with alpha, to be split into png and saved with alpha.
}
public static ImagePlus[] getInterCombinedTexturesNew( // return ImagePlus[] matching tileClusters[], with alpha
final CLTParameters clt_parameters,
ColorProcParameters colorProcParameters,
EyesisCorrectionParameters.RGBParameters rgbParameters,
QuadCLT parameter_scene, // to use for rendering parameters in multi-series sequences
// if null - use reference scene
final QuadCLT [] scenes,
final boolean [] scenes_sel, // null or which scenes to process
final boolean [] selection, // may be null, if not null do not process unselected tiles
final TileCluster [] tileClusters, // disparities, borders, selections for texture passes
final boolean renormalize, // false - use normalizations from previous scenes to keep consistent colors
final int debug_level)
{
// TODO: ***** scenes with high motion blur also have high ERS to be corrected ! *****
final int ref_index = scenes.length -1;
final QuadCLT ref_scene = scenes[ref_index];
if (parameter_scene == null) {
parameter_scene = ref_scene;
}
final int earliestScene = ref_scene.getEarliestScene(scenes);
final ErsCorrection ers_reference = ref_scene.getErsCorrection();
final int tilesX = ref_scene.getTileProcessor().getTilesX();
final int tilesY = ref_scene.getTileProcessor().getTilesY();
final int tiles = tilesX * tilesY;
final int transform_size= ref_scene.getTileProcessor().getTileSize();
final int tile_len = transform_size * transform_size;
// final int num_channels = ref_scene.isMonochrome()?2:4; //
final boolean filter_bg = true; // make a clt parameter?
final boolean mb_en = clt_parameters.imp.mb_en;
final double mb_tau = clt_parameters.imp.mb_tau; // 0.008;// time constant, sec
final double mb_max_gain = clt_parameters.imp.mb_max_gain; // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
final double max_distortion = clt_parameters.tex_distort; // 0.5; // Maximal texture distortion to accumulate multiple scenes (0 - any)
final double tex_mb = clt_parameters.tex_mb; // 1.0; // Reduce texture weight if motion blur exceeds this (as square of MB length)
final boolean sharp_alpha = clt_parameters.sharp_alpha;
final boolean is_lwir = ref_scene.isLwir();
final boolean tex_um = clt_parameters.tex_um; // imp.um_mono; // TODO: add own parameter
final double tex_um_sigma = clt_parameters.tex_um_sigma; // imp.um_sigma;
final double tex_um_weight = clt_parameters.tex_um_weight; // imp.um_weight;
// TODO: - make texture variants, tex_um_fixed/tex_um_range apply only to unsharp mask, regardless of colors
final boolean lwir_autorange = is_lwir && clt_parameters.tex_lwir_autorange; // colorProcParameters.lwir_autorange;
final boolean tex_um_fixed = clt_parameters.tex_um_fixed; // imp.mono_fixed; // true; // normalize to fixed range when converting to 8 bits
final double tex_um_range = clt_parameters.tex_um_range; // imp.mono_range; // 500.0; // monochrome full-scale range (+/- half)
final boolean tex_hist_norm = clt_parameters.tex_hist_norm; // true;
final double tex_hist_amount = clt_parameters.tex_hist_amount; // clt_parameters. 0.7;
final int tex_hist_bins = clt_parameters.tex_hist_bins; // 1024 ;
final int tex_hist_segments =clt_parameters.tex_hist_segments; // 32 ;
final boolean tex_color = clt_parameters.tex_color; // true;
final int tex_palette = clt_parameters.tex_palette; // 2 ;
ImageDtt image_dtt;
image_dtt = new ImageDtt(
ref_scene.getNumSensors(), // numSens,
transform_size,
clt_parameters.img_dtt,
ref_scene.isAux(),
ref_scene.isMonochrome(),
ref_scene.isLwir(),
clt_parameters.getScaleStrength(ref_scene.isAux()),
ref_scene.getGPU());
if (ref_scene.getGPU() != null) {
ref_scene.getGPU().setGpu_debug_level(debug_level);
}
image_dtt.getCorrelation2d(); // initiate image_dtt.correlation2d, needed if disparity_map != null
final int num_slices = tileClusters.length;
double [][][] inter_weights = new double [num_slices][tilesY][tilesX]; // per-tile texture weights for inter-scene accumulation;
// double [][][][][] inter_textures= new double [num_slices][tilesY][tilesX][][]; // [channel][256] - non-overlapping textures
// weighted sum
double [][][][][] inter_textures_wd= new double [num_slices][tilesY][tilesX][][]; // [channel][64] - overlapping textures
// weighted sum of squares
double [][][][][] inter_textures_wd2= new double [num_slices][tilesY][tilesX][][]; // [channel][64] - overlapping textures
double [][][] ref_pXpYDs = new double [num_slices][][]; // individual for each slice
int [][] cluster_indices = (max_distortion > 0.0) ? (new int [num_slices][]): null;
boolean [][] borders = new boolean [num_slices][];
for (int nslice = 0; nslice < num_slices; nslice++) { // prepare and measure textures for each combo textures
ref_pXpYDs[nslice] = OpticalFlow.transformToScenePxPyD( // now should work with offset ref_scene
null, // fov_tiles, // final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
tileClusters[nslice].getDisparity(), // final double [] disparity_ref, // invalid tiles - NaN in disparity
OpticalFlow.ZERO3, // final double [] scene_xyz, // camera center in world coordinates
OpticalFlow.ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
scenes[ref_index], // final QuadCLT scene_QuadClt,
scenes[ref_index], // final QuadCLT reference_QuadClt, // now - may be null - for testing if scene is rotated ref
THREADS_MAX); // int threadsMax)
borders[nslice] = tileClusters[nslice].getBorder();
if (max_distortion > 0.0) {
cluster_indices[nslice] = tileClusters[nslice].getClusterIndex();
}
}
final int num_sensors = parameter_scene.getNumSensors();
final int num_colors = parameter_scene.isMonochrome()?1:3;
for (int nscene = earliestScene; nscene < scenes.length; nscene++) if ((scenes_sel == null) || scenes_sel[nscene]){
String ts = scenes[nscene].getImageName();
double [] scene_xyz = OpticalFlow.ZERO3;
double [] scene_atr = OpticalFlow.ZERO3;
if (nscene != ref_index) {
scene_xyz = ers_reference.getSceneXYZ(ts);
scene_atr = ers_reference.getSceneATR(ts);
if ((scene_xyz == null) || (scene_atr == null)){
continue; // scene is not matched
}
double [] scene_ers_xyz_dt = ers_reference.getSceneErsXYZ_dt(ts);
double [] scene_ers_atr_dt = ers_reference.getSceneErsATR_dt(ts);
scenes[nscene].getErsCorrection().setErsDt(
scene_ers_xyz_dt, // double [] ers_xyz_dt,
scene_ers_atr_dt); // double [] ers_atr_dt)(ers_scene_original_xyz_dt);
}
double [][] dxyzatr_dt = null;
// should get velocities from HashMap at reference scene from timestamp , not re-calculate.
if (mb_en) { // all scenes have the same name/path
// dxyzatr_dt = OpticalFlow.getVelocities( // looks at previous/next scene poses
// scenes, // QuadCLT [] quadCLTs,
// nscene); // int nscene)
dxyzatr_dt = new double[][] { // for all, including ref
scenes[nscene].getErsCorrection().getErsXYZ_dt(),
scenes[nscene].getErsCorrection().getErsATR_dt()};
}
scenes[nscene].saveQuadClt(); // to re-load new set of Bayer images to the GPU (do nothing for CPU)
//parameter_scene
// boolean keep_channels = false;
for (int nslice = 0; nslice < num_slices; nslice++) { // prepare and measure textures for each combo textures
final double [] disparity_ref = tileClusters[nslice].getDisparity(); // disparity in the reference view tiles (Double.NaN - invalid)
// Motion blur vectors are individual per-slice
// Calculate motion blur vectors - may be used to modify weights of averaged textures
final double [][] motion_blur = (mb_en && (dxyzatr_dt != null))? OpticalFlow.getMotionBlur(
scenes[ref_index], // QuadCLT ref_scene,
scenes[nscene], // QuadCLT scene, // can be the same as ref_scene
ref_pXpYDs[nslice], // double [][] ref_pXpYD, // here it is scene, not reference!
scene_xyz, // double [] camera_xyz,
scene_atr, // double [] camera_atr,
dxyzatr_dt[0], // double [] camera_xyz_dt,
dxyzatr_dt[1], // double [] camera_atr_dt,
0, // int shrink_gaps, // will gaps, but not more that grow by this
debug_level) : null; // int debug_level)
if (debug_level > 0) {
System.out.println("nscene="+nscene+", nslice="+nslice+" will run texturesGPUFromDSI() that needs debug >2");
System.out.print("");
}
double [][][][] slice_texture88 = QuadCLT.texturesNoOverlapGPUFromDSI(
clt_parameters, // CLTParameters clt_parameters,
disparity_ref, // double [] disparity_ref,
// motion blur compensation
mb_tau, // double mb_tau, // 0.008; // time constant, sec
mb_max_gain, // double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
motion_blur, // double [][] mb_vectors, // now [2][ntiles];
scene_xyz, // final double [] scene_xyz, // camera center in world coordinates
scene_atr, // final double [] scene_atr, // camera orientation relative to world frame
scenes[nscene], // final QuadCLT scene,
scenes[ref_index], // final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
filter_bg && (nscene != ref_index), // final boolean filter_bg, // remove bg tiles (possibly occluded)
max_distortion, // final double max_distortion, // maximal neighbor tiles offset as a fraction of tile size (8)
cluster_indices[nslice], // final int [] cluster_index, //
borders[nslice], // final boolean [] border, // border tiles
true, //keep_channels, // final boolean keep_channels,
debug_level); // final int debugLevel);
if (slice_texture88 != null) {
// Use MB vectors for texture weights
final Thread[] threads = ImageDtt.newThreadArray(THREADS_MAX);
final AtomicInteger ai = new AtomicInteger(0);
final int fnslice = nslice;
final double mb_tau2 = mb_tau * mb_tau / tex_mb / tex_mb;
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) {
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
if (slice_texture88[tileY][tileX] != null) {
double w = 1.0;
if (tex_mb > 0.0) {
double mb_l2 = mb_tau2 * ( motion_blur[0][nTile]*motion_blur[0][nTile] + // motion_blur == null;
motion_blur[1][nTile]*motion_blur[1][nTile]);
if (mb_l2 > 1.0) {
w /= mb_l2; // 1/(squared mb)
}
}
if (w > 0) {
inter_weights[fnslice][tileY][tileX] +=w;
if (inter_textures_wd[fnslice][tileY][tileX] == null) { // create if it did not exist
inter_textures_wd[fnslice][tileY][tileX] = new double [slice_texture88[tileY][tileX].length + num_colors][slice_texture88[tileY][tileX][0].length];
inter_textures_wd2[fnslice][tileY][tileX] = new double [slice_texture88[tileY][tileX].length + num_colors][slice_texture88[tileY][tileX][0].length];
}
for (int nchn = 0; nchn < slice_texture88[tileY][tileX].length; nchn++) {
for (int i = 0; i < slice_texture88[tileY][tileX][nchn].length; i++) {
double d = slice_texture88[tileY][tileX][nchn][i];
inter_textures_wd [fnslice][tileY][tileX][nchn][i] += w * d;
inter_textures_wd2[fnslice][tileY][tileX][nchn][i] += w * d *d;
}
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
}
if (debug_level > -2) { // -1
if (nscene == ref_index) {
System.out.println("Textures from the reference scene, nslice = " + nslice +((slice_texture88 == null)? " - EMPTY":""));
} else {
System.out.println("Textures from scene "+nscene+", slice="+nslice +((slice_texture88 == null)? " - EMPTY":""));
}
}
} // for (int nslice = 0; nslice < num_slices; nslice++) {
} // for (int nscene = 0; nscene < num_scenes; nscene++) {
// Divide accumulated data by weights
double [][][] faded_textures = new double [num_slices][][];
final double [][] dbg_weights = (debug_level > 0 )?(new double [num_slices][tiles]) : null;
final double [][] dbg_overlap = (debug_level > 0 )?(new double [num_slices*(num_colors+1)][]) : null;
for (int nslice = 0; nslice < num_slices; nslice++) {
final int fnslice = nslice;
if (dbg_weights != null) {
Arrays.fill(dbg_weights[nslice], Double.NaN);
}
final Thread[] threads = ImageDtt.newThreadArray(THREADS_MAX);
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) {
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
if (inter_weights[fnslice][tileY][tileX] > 0.0) {
if (dbg_weights != null) {
dbg_weights[fnslice][nTile] = inter_weights[fnslice][tileY][tileX];
}
double w = 1.0/ inter_weights[fnslice][tileY][tileX];
for (int nchn = 0; nchn < inter_textures_wd[fnslice][tileY][tileX].length - num_colors; nchn++) {
for (int i = 0; i < inter_textures_wd[fnslice][tileY][tileX][nchn].length; i++) {
double d = inter_textures_wd[fnslice][tileY][tileX][nchn][i] * w; // average
double d2= inter_textures_wd2[fnslice][tileY][tileX][nchn][i] * w; // average of squared
inter_textures_wd[fnslice][tileY][tileX][nchn][i] = d;
inter_textures_wd2[fnslice][tileY][tileX][nchn][i] = Math.sqrt(d2- d * d);
}
}
for (int ncol = 0; ncol < num_colors; ncol++) {
int navg = inter_textures_wd[fnslice][tileY][tileX].length - num_colors + ncol;
for (int i = 0; i < tile_len; i++) {
inter_textures_wd[fnslice][tileY][tileX][navg][i] = 0;
inter_textures_wd2[fnslice][tileY][tileX][navg][i] = 0;
for (int nsens = 0; nsens < num_sensors; nsens++) {
inter_textures_wd[fnslice][tileY][tileX][navg][i] +=
inter_textures_wd[fnslice][tileY][tileX][1 + (nsens + 1) * num_colors][i]/num_sensors;
inter_textures_wd2[fnslice][tileY][tileX][navg][i] +=
inter_textures_wd2[fnslice][tileY][tileX][1 + (nsens + 1) * num_colors][i]/num_sensors;
}
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
// debug-display this slice here
if (debug_level > -10) {
int var_radius = 3;
double try_dir_var = 30.0; // try directional if the intersensor variance exceeds this value
int dir_num_start = 5; // start with this number of consecutive sensors
double dir_worsen_rel = 0.15; // add more sensors until variance grows by this relative
double dir_var_max = 20.0; // do not add more sensors if the variance would exceed this
int dbg_slices = num_colors + 1 +num_colors*num_sensors + num_colors;
int dbg_width = tilesX * transform_size;
int dbg_height = tilesY * transform_size;
double [][] dbg_textures = new double [dbg_slices][dbg_width*dbg_height];
double [][] dbg_textures2 = new double [dbg_slices][dbg_width*dbg_height];
String [] dbg_titles = new String[dbg_textures.length];
int dbg_slices_v = dbg_slices + 8 * num_colors;
double [][] dbg_textures_v = new double [dbg_slices_v][];
String [] dbg_titles_v = new String[dbg_textures_v.length];
int tindx = 0;
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles[tindx++] = "C"+ncol;
}
dbg_titles[tindx++] = "ALPHA";
for (int nsens = 0; nsens < num_sensors; nsens++) {
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles[tindx++] = "T"+nsens+((num_colors>1)?(":"+ncol):"");
}
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles[tindx++] = "AVG"+((num_colors>1)?(ncol):"");
}
for (int n = 0; n < dbg_slices; n++) {
Arrays.fill(dbg_textures[n], Double.NaN);
Arrays.fill(dbg_textures2[n], Double.NaN);
}
// inter_textures_wd[fnslice][tileY][tileX][navg][i] +=
for (int tileY = 0; tileY < tilesY; tileY++) {
for (int tileX = 0; tileX < tilesX; tileX++) if (inter_textures_wd[fnslice][tileY][tileX] != null){
for (int row = 0; row < transform_size; row++) {
for (int n = 0; n < dbg_slices; n++) {
System.arraycopy(
inter_textures_wd[fnslice][tileY][tileX][n],
row*transform_size,
dbg_textures[n],
(tileY * transform_size + row) * dbg_width + (tileX * transform_size),
transform_size);
System.arraycopy(
inter_textures_wd2[fnslice][tileY][tileX][n],
row*transform_size,
dbg_textures2[n],
(tileY * transform_size + row) * dbg_width + (tileX * transform_size),
transform_size);
}
}
}
}
ShowDoubleFloatArrays.showArrays(
dbg_textures,
dbg_width,
dbg_height,
true,
ref_scene.getImageName()+"-combined_textures88-"+String.format("%02d", nslice),
dbg_titles);
ShowDoubleFloatArrays.showArrays(
dbg_textures2,
dbg_width,
dbg_height,
true,
ref_scene.getImageName()+"-combined_textures88-rmse-"+String.format("%02d", nslice),
dbg_titles);
// get variance-same, variance-inter, variance-seme/variance-inter
tindx = 0;
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "C"+ncol;
}
dbg_titles_v[tindx++] = "ALPHA";
for (int nsens = 0; nsens < num_sensors; nsens++) {
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "T"+nsens+((num_colors>1)?(":"+ncol):"");
}
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "AVG"+((num_colors>1)?(ncol):"");
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "VAR_SAME"+((num_colors>1)?(ncol):"");
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "VAR_INTER"+((num_colors>1)?(ncol):"");
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "VAR_RATIO"+((num_colors>1)?(ncol):"");
}
// debugging BG pixels obscured by the FG ones
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "DIR"+((num_colors>1)?(ncol):""); // direction (center, step 0.5)
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "LEN"+((num_colors>1)?(ncol):""); // number of consecutive sensors
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "DIRTEX"+((num_colors>1)?(ncol):""); // average texture value with subset of sensors
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "DIRVAR"+((num_colors>1)?(ncol):""); // variance for the subset of sensors
}
for (int ncol = 0; ncol < num_colors; ncol++) {
dbg_titles_v[tindx++] = "DIRRATIO"+((num_colors>1)?(ncol):""); // ratio of space variance to directional variance
}
for (int n = 0; n < dbg_slices; n++) {
dbg_textures_v[n] = dbg_textures[n];
}
for (int n = dbg_slices; n < dbg_slices_v; n++) {
dbg_textures_v[n] = new double[dbg_width*dbg_height];
Arrays.fill(dbg_textures_v[n], Double.NaN);
}
final TileNeibs pn = new TileNeibs(tilesX*transform_size, tilesY*transform_size);
for (int ncol = 0; ncol < num_colors; ncol++) {
final int indx_mean = num_colors + 1 + num_sensors*num_colors; // average of channels // ncol; //
final int indx_chn0 = num_colors + 1 + ncol; // + nsens*num_colors
final int indx_var_same = num_colors + 1 + num_sensors*num_colors + 1 + ncol;
final int indx_var_inter = indx_var_same + num_colors;
final int indx_var_ratio = indx_var_inter + num_colors;
final int indx_dir = indx_var_ratio + 1 * num_colors;
final int indx_len = indx_var_ratio + 2 * num_colors;
final int indx_dir_tex = indx_var_ratio + 3 * num_colors;
final int indx_dir_var = indx_var_ratio + 4 * num_colors;
final int indx_dir_ratio = indx_var_ratio + 5 * num_colors;
ai.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nTile = ai.getAndIncrement(); nTile < tiles; nTile = ai.getAndIncrement()) {
int tileX = nTile % tilesX;
int tileY = nTile / tilesX;
for (int dy = 0; dy < transform_size; dy++) {
int y0 = tileY * transform_size + dy;
for (int dx = 0; dx < transform_size; dx++) {
int x0 = tileX * transform_size + dx;
int indx0 = pn.getIndex(x0, y0);
double var_same = Double.NaN;
double var_inter = Double.NaN;
if ((indx0 >= 0) && !Double.isNaN(dbg_textures_v[indx_mean][indx0])) {
// calculate unweighted variance
double sw = 0.0, swd=0.0, swd2 = 0.0;
for (int dvy = -var_radius; dvy <= var_radius; dvy++) {
for (int dvx = -var_radius; dvx <= var_radius; dvx++) {
int indx = pn.getIndex(x0+dvx, y0+dvy);
if ((indx >= 0) && !Double.isNaN(dbg_textures_v[indx_mean][indx])) {
double w = 1.0;
double d = dbg_textures_v[indx_mean][indx];
sw += w;
swd += w * d;
swd2 += w * d*d;
}
}
}
if (sw > 0.0) { // always
double avg = swd/sw;
double avg2 = swd2/sw;
var_same = Math.sqrt(avg2-avg*avg);
dbg_textures_v[indx_var_same][indx0] = var_same;
}
// calculate inter-sensor variance (add local normalization?)
sw = 0.0; swd=0.0; swd2 = 0.0;
for (int nsens = 0; nsens< num_sensors; nsens++) {
double w = 1.0;
double d = dbg_textures_v[indx_chn0 + nsens*num_colors][indx0];
sw += w;
swd += w * d;
swd2 += w * d*d;
}
if (sw > 0.0) { // always
double avg = swd/sw;
double avg2 = swd2/sw;
var_inter = Math.sqrt(avg2-avg*avg);
dbg_textures_v[indx_var_inter][indx0] = var_inter;
}
dbg_textures_v[indx_var_ratio][indx0] = var_same/var_inter;
// try to improve var_inter for BG tiles by selecting sensor to look behind from one side
dbg_textures_v[indx_dir_tex][indx0] = dbg_textures_v[indx_mean][indx0];
dbg_textures_v[indx_dir_var][indx0] = var_inter;
dbg_textures_v[indx_dir_ratio][indx0] = var_same/var_inter;
double [] dirvar = new double [num_sensors];
// extract to a separate method
// move color to outer cycle
// pass color-dependent array of relevant slices
// calculate array of variances dependent on dir_start and
// dir_len, and mix them according to neighbor weights, then find
// the best dir and length. Then use only for the center pixel.
// For BG tiles - only change Y, keep alpha = 1.0, for FG - cut transparent and maybe remove texture tiles at all?
if (var_inter > try_dir_var) {
sw = 0.0; swd=0.0; swd2 = 0.0;
for (int i = 0; i < dir_num_start; i++) {
double w = 1.0;
double d = dbg_textures_v[indx_chn0 + i*num_colors][indx0];
sw += w;
swd += w * d;
swd2 += w * d*d;
}
double [] sw_dir = new double[num_sensors];
double [] swd_dir = new double[num_sensors];
double [] swd2_dir = new double[num_sensors];
for (int nsens = 0; nsens < num_sensors; nsens++) {
double avg = swd/sw;
double avg2 = swd2/sw;
dirvar[nsens] = Math.sqrt(avg2-avg*avg);
sw_dir[nsens] = sw;
swd_dir[nsens] = swd;
swd2_dir[nsens] = swd2;
double w0 = 1.0;
double d0 = dbg_textures_v[indx_chn0 + nsens*num_colors][indx0];
sw -= w0;
swd -= w0 * d0;
swd2 -= w0 * d0 * d0;
int nsens1 = (nsens + dir_num_start) % num_sensors;
double w1 = 1.0;
double d1 = dbg_textures_v[indx_chn0 + nsens1 * num_colors][indx0];
sw += w1;
swd += w1 * d1;
swd2 += w1 * d1 * d1;
}
int dir_best = 0; // start of consecutive sensor numbers
for (int i = 1; i < num_sensors; i++) {
if (dirvar[i] < dirvar[dir_best]) {
dir_best = i;
}
}
// now try to increase number of averaged sensors
// try forward and backward, select best, verify it fits.
double dir_var = dirvar[dir_best];
double max_var = Math.min(dir_var_max, dir_var*(1.0+dir_worsen_rel));
sw = sw_dir [dir_best];
swd = swd_dir [dir_best];
swd2 = swd2_dir[dir_best];
int dir_start = dir_best;
int dir_len ;
for (dir_len = dir_num_start; dir_len < num_sensors; dir_len++) {
int nsens0 = dir_start - 1; //) % num_sensors;
if (nsens0 < 0) {
nsens0 += num_sensors;
}
double w = 1.0;
double d = dbg_textures_v[indx_chn0 + nsens0 * num_colors][indx0];
double sw_0 = sw + w;
double swd_0 = swd + w*d;
double swd2_0 = swd2 + w*d*d;
double avg = swd_0 / sw_0;
double avg2 = swd2_0 / sw_0;
double var_0 = Math.sqrt(avg2 - avg * avg);
int nsens1 = dir_start + dir_len;
if (nsens1 >=num_sensors) {
nsens1 -= num_sensors ;
}
w = 1.0;
d = dbg_textures_v[indx_chn0 + nsens1 * num_colors][indx0];
double sw_1 = sw + w;
double swd_1 = swd + w*d;
double swd2_1 = swd2 + w*d*d;
avg = swd_1 / sw_1;
avg2 = swd2_1 / sw_1;
double var_1 = Math.sqrt(avg2 - avg * avg);
if (Math.min(var_0, var_1) > max_var) {
break;
}
if (var_0 < var_1) {
dir_start = nsens0;
sw = sw_0;
swd = swd_0;
swd2 = swd2_0;
dir_var = var_0;
} else {
// dir_start stays the same
sw = sw_1;
swd = swd_1;
swd2 = swd2_1;
dir_var = var_1;
}
}
double dir_avg = swd/sw;
double ddir = dir_start + 0.5 * (dir_len - 1);
if (ddir >= num_sensors) {
ddir -= num_sensors;
}
// fill arrays
dbg_textures_v[indx_dir][indx0] = ddir;
dbg_textures_v[indx_len][indx0] = dir_len;
dbg_textures_v[indx_dir_tex][indx0] = dir_avg;
dbg_textures_v[indx_dir_var][indx0] = dir_var;
dbg_textures_v[indx_dir_ratio][indx0] = var_same/dir_var;
}
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
ShowDoubleFloatArrays.showArrays(
dbg_textures_v,
dbg_width,
dbg_height,
true,
ref_scene.getImageName()+"-textures88-variances-"+String.format("%02d", nslice),
dbg_titles_v);
}
}
// Process slice of textures: apply borders, convert to color or apply UM, add synthetic mesh, ...
// 2 layers for mono, 4 layers - for color
// First - merge overlapped and apply borders, alpha is the last slice
/*
faded_textures[nslice] = getFadedTextures( // get image from a single pass, return relative path for x3d // USED in lwir
clt_parameters, // CLTParameters clt_parameters,
inter_textures[fnslice], // final double [][][][] texture_tiles, // array [tilesY][tilesX][4][4*transform_size] or [tilesY][tilesX]{null}
tileClusters[fnslice], // final TileCluster tileCluster, // disparities, borders, selections for texture passes
sharp_alpha, // final boolean sharp_alpha, // combining mode for alpha channel: false - treat as RGB, true - apply center 8x8 only
transform_size, // final int transform_size, //
(num_colors+1), // final int num_channels, // 4 for RGBA, 2 for Y (should match textures)
debug_level); // final int debugLevel)
if (dbg_overlap != null) {
double [][] non_overlap = combineYRBGATiles(
inter_textures[fnslice], // final double [][][][] texture_tiles, // array [tilesY][tilesX][4][4*transform_size] or [tilesY][tilesX]{null}
false, // final boolean overlap, // when false - output each tile as 16x16, true - overlap to make 8x8
sharp_alpha, // final boolean sharp_alpha, // combining mode for alpha channel: false - treat as RGB, true - apply center 8x8 only
transform_size, // final int transform_size, //
(num_colors+1), //final int num_channels, // 4 for RGBA, 2 for Y (should match textures)
debug_level); // final int debugLevel)
for (int i = 0; i < num_channels; i++) {
dbg_overlap[fnslice*(num_colors+1)+i] = non_overlap[i];
}
}
*/
}
if (debug_level > -1000) {
return null;
}
if (debug_level > -1) {
double [][] dbg_textures = new double [faded_textures.length * faded_textures[0].length][faded_textures[0][0].length];
String [] dbg_titles = new String[dbg_textures.length];
String [] dbg_subtitles = new String [faded_textures[0].length];
for (int i = 0; i < dbg_subtitles.length; i++) {
dbg_subtitles[i] = (i < (dbg_subtitles.length -1)) ? ("Y"+i):"alpha";
}
for (int i = 0; i < dbg_textures.length; i++) {
dbg_textures[i] = faded_textures[i / faded_textures[0].length][i % faded_textures[0].length];
dbg_titles[i] = dbg_subtitles[i % dbg_subtitles.length] + "-" + (i / dbg_subtitles.length);
}
ShowDoubleFloatArrays.showArrays(
dbg_textures,
tilesX * transform_size,
tilesY * transform_size,
true,
ref_scene.getImageName()+"-combined_textures-prenorm-pre_UM",
dbg_titles);
}
if (debug_level > -100) {
return null;
}
// Optionally apply UM (before auto/manual range)
if (tex_um) {
QuadCLTCPU.umTextures(
faded_textures, // final double [][][] textures, // [nslices][nchn][i]
tilesX * transform_size, // final int width,
tex_um_sigma, // final double um_sigma,
tex_um_weight); // final double um_weight)
}
if (debug_level > -1) {
double [][] dbg_textures = new double [faded_textures.length * faded_textures[0].length][faded_textures[0][0].length];
String [] dbg_titles = new String[dbg_textures.length];
String [] dbg_subtitles = new String [faded_textures[0].length];
for (int i = 0; i < dbg_subtitles.length; i++) {
dbg_subtitles[i] = (i < (dbg_subtitles.length -1)) ? ("Y"+i):"alpha";
}
for (int i = 0; i < dbg_textures.length; i++) {
dbg_textures[i] = faded_textures[i / faded_textures[0].length][i % faded_textures[0].length];
dbg_titles[i] = dbg_subtitles[i % dbg_subtitles.length] + "-" + (i / dbg_subtitles.length);
}
ShowDoubleFloatArrays.showArrays(
dbg_textures,
tilesX * transform_size,
tilesY * transform_size,
true,
ref_scene.getImageName()+"-combined_textures-prenorm",
dbg_titles);
if (dbg_overlap != null) {
ShowDoubleFloatArrays.showArrays(
dbg_overlap,
2 * tilesX * transform_size,
2 * tilesY * transform_size,
true,
ref_scene.getImageName()+"-non-overlap_textures-prenorm",
dbg_titles);
}
if (dbg_weights != null) {
ShowDoubleFloatArrays.showArrays(
dbg_weights,
tilesX,
tilesY,
true,
ref_scene.getImageName()+"-texture_weights-prenorm");
}
}
//renormalize
// normalize all slices together if LWIR
// FIXME: Should it be here? Will setColdHot() change photometric calibration ? Or should it be disabled?
double [] norm_table = null; // first try, then make save to properties with cold/hot
if (renormalize) {
if (lwir_autorange) {
double rel_low;
double rel_high;
boolean force_min_max = true;
if (!tex_um && !force_min_max) { // for UM will use min/max
rel_low = colorProcParameters.lwir_low;
rel_high = colorProcParameters.lwir_high;
if (!Double.isNaN(parameter_scene.getLwirOffset())) { // ref_scene or parameter_scene? Or both?
rel_low -= parameter_scene.getLwirOffset();
rel_high -= parameter_scene.getLwirOffset();
}
} else { // for UM need to calculate min and max (probably OK for non-UM too !)
double [] minmax = QuadCLTCPU.getMinMaxTextures(
faded_textures ); //double [][][] textures // [slices][nchn][i]
rel_low = minmax[0]; // absolute min
rel_high = minmax[1]; // absolute max
}
double [] cold_hot = QuadCLTCPU.autorangeTextures(
faded_textures, // double [][][] textures, // [nslices][nchn][i]
rel_low, // double hard_cold,// matches data, DC (this.lwir_offset) subtracted
rel_high, // double hard_hot, // matches data, DC (this.lwir_offset) subtracted
colorProcParameters.lwir_too_cold, // double too_cold, // pixels per image
colorProcParameters.lwir_too_hot, // double too_hot, // pixels per image
tex_hist_bins); // int num_bins)
if ((cold_hot != null) && !tex_um && !force_min_max) {
if (!Double.isNaN(parameter_scene.getLwirOffset())) {
cold_hot[0] += parameter_scene.getLwirOffset();
cold_hot[1] += parameter_scene.getLwirOffset();
}
}
parameter_scene.setColdHot(cold_hot); // will be used for shifted images and for texture tiles
} else if (tex_um && tex_um_fixed) { // apply fixed range, but for UM only (what about RGB?)
parameter_scene.setColdHot(-0.5*tex_um_range, 0.5*tex_um_range);
}
if (tex_hist_norm) { // will normalize (0..1) keeping cold_hot to apply during rendering
// last norm_table element is <=1.0, first >=0;
norm_table = QuadCLTCPU.getHistogramNormalization(
faded_textures, // double [][][] textures, // [nslices][nchn][i]
parameter_scene.getColdHot(), // double [] minmax,
tex_hist_bins, // int num_bins,
tex_hist_segments, //int num_nodes
tex_hist_amount); //double hist_normalize_amount // 1.0 - full
}
}
if (tex_hist_norm && (norm_table != null)) {
// apply histogram normalization
double [] cold_hot = parameter_scene.getColdHot(); // used in linearStackToColor
double [] inverted_table = QuadCLTCPU.invertHistogramNormalization(
norm_table, // double [] direct_table, // last is <1.0, first > 0
tex_hist_bins); // int num_bins)
QuadCLTCPU.applyTexturesNormHist(
faded_textures, // final double [][][] textures, // [nslices][nchn][i]
cold_hot, // final double [] min_max,
inverted_table); // final double [] inv_table)
}
if (debug_level > -1) {
double [][] dbg_textures = new double [faded_textures.length * faded_textures[0].length][faded_textures[0][0].length];
String [] dbg_titles = new String[dbg_textures.length];
String [] dbg_subtitles = new String [faded_textures[0].length];
for (int i = 0; i < dbg_subtitles.length; i++) {
dbg_subtitles[i] = (i < (dbg_subtitles.length -1)) ? ("Y"+i):"alpha";
}
for (int i = 0; i < dbg_textures.length; i++) {
dbg_textures[i] = faded_textures[i / faded_textures[0].length][i % faded_textures[0].length];
dbg_titles[i] = dbg_subtitles[i % dbg_subtitles.length] + "-" + (i / dbg_subtitles.length);
}
ShowDoubleFloatArrays.showArrays(
dbg_textures,
tilesX * transform_size,
tilesY * transform_size,
true,
ref_scene.getImageName()+"-combined_textures",
dbg_titles);
if (dbg_overlap != null) {
ShowDoubleFloatArrays.showArrays(
dbg_overlap,
2 * tilesX * transform_size,
2 * tilesY * transform_size,
true,
ref_scene.getImageName()+"-non-overlap_textures",
dbg_titles);
}
if (dbg_weights != null) {
ShowDoubleFloatArrays.showArrays(
dbg_weights,
tilesX,
tilesY,
true,
ref_scene.getImageName()+"-texture_weights");
}
}
double [] minmax = parameter_scene.getColdHot(); // used in linearStackToColor
ImagePlus [] imp_tex = new ImagePlus[num_slices];
for (int nslice = 0; nslice < num_slices; nslice++) {
String title=String.format("%s-combo%03d-texture",ref_scene.getImageName(), nslice);
imp_tex[nslice] = QuadCLTCPU.linearStackToColorLWIR(
clt_parameters, // CLTParameters clt_parameters,
tex_palette, // int lwir_palette, // <0 - do not convert
minmax, // double [] minmax,
title, // String name,
"", // String suffix, // such as disparity=...
tex_color, // boolean toRGB,
faded_textures[nslice], // double [][] texture_data,
tilesX * transform_size, // int width, // int tilesX,
tilesY * transform_size, // int height, // int tilesY,
debug_level); // int debugLevel )
// Add synthetic mesh only with higher resolution? or just any by a specified period?what king of mesh - vertical random, ...
// Split and save as png
}
// Process accumulated textures: average, apply borders, convert to color or apply UM, add synthetic mesh, ...
return imp_tex; // ImagePlus[] ? with alpha, to be split into png and saved with alpha.
}
public static ImagePlus [] splitCombinedTextures(
TileCluster [] tileClusters, //should have name <timestamp>-*
int transform_size,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment