Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
I
imagej-elphel
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
3
Issues
3
List
Board
Labels
Milestones
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Commits
Issue Boards
Open sidebar
Elphel
imagej-elphel
Commits
7d6fb681
Commit
7d6fb681
authored
Aug 09, 2022
by
Andrey Filippov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
First version of MB correction, changing format
parent
e4c6d901
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
609 additions
and
178 deletions
+609
-178
GpuQuad.java
src/main/java/com/elphel/imagej/gpu/GpuQuad.java
+159
-5
TpTask.java
src/main/java/com/elphel/imagej/gpu/TpTask.java
+19
-3
ImageDtt.java
src/main/java/com/elphel/imagej/tileprocessor/ImageDtt.java
+47
-3
IntersceneLma.java
...n/java/com/elphel/imagej/tileprocessor/IntersceneLma.java
+35
-21
IntersceneMatchParameters.java
...lphel/imagej/tileprocessor/IntersceneMatchParameters.java
+38
-12
OpticalFlow.java
...ain/java/com/elphel/imagej/tileprocessor/OpticalFlow.java
+198
-101
QuadCLT.java
src/main/java/com/elphel/imagej/tileprocessor/QuadCLT.java
+113
-33
No files found.
src/main/java/com/elphel/imagej/gpu/GpuQuad.java
View file @
7d6fb681
...
...
@@ -3719,7 +3719,6 @@ public class GpuQuad{ // quad camera description
//change to fixed 511?
final
int
task_code
=
((
1
<<
num_pairs
)-
1
)
<<
GPUTileProcessor
.
TASK_CORR_BITS
;
// correlation only
final
double
min_px
=
margin
;
// final double max_px = img_width - 1 - margin;
final
double
max_px
=
geometryCorrection
.
getSensorWH
()[
0
]
-
1
-
margin
;
// sensor width here, not window width
final
double
[]
min_py
=
new
double
[
num_cams
]
;
final
double
[]
max_py
=
new
double
[
num_cams
]
;
...
...
@@ -3727,7 +3726,6 @@ public class GpuQuad{ // quad camera description
min_py
[
i
]
=
margin
+
(
calcPortsCoordinatesAndDerivatives
?
geometryCorrection
.
getWOITops
()[
i
]
:
0
);
// camera_heights array is only set during conditionImageSet(), not called by the intersceneAccumulate()
// That was correct, as all scenes should be conditioned
// max_py [i] = geometryCorrection.getWOITops()[i] + geometryCorrection.getCameraHeights()[i] - 1 - margin;
max_py
[
i
]
=
geometryCorrection
.
getSensorWH
()[
1
]
-
1
-
margin
;
// same for all channels?
//.getSensorWH()[0]
}
...
...
@@ -3807,11 +3805,167 @@ public class GpuQuad{ // quad camera description
public
static
TpTask
[][]
setInterTasksMotionBlur
(
final
int
num_cams
,
final
int
img_width
,
// should match pXpYD
final
boolean
calcPortsCoordinatesAndDerivatives
,
// GPU can calculate them centreXY
final
double
[][]
pXpYD
,
// per-tile array of pX,pY,disparity triplets (or nulls)
final
boolean
[]
selection
,
// may be null, if not null do not process unselected tiles
// motion blur compensation
final
double
mb_tau
,
// 0.008; // time constant, sec
final
double
mb_max_gain
,
// 5.0; // motion blur maximal gain (if more - move second point more than a pixel
final
double
[][]
mb_vectors
,
//
final
GeometryCorrection
geometryCorrection
,
final
double
disparity_corr
,
final
int
margin
,
// do not use tiles if their centers are closer to the edges
final
boolean
[]
valid_tiles
,
final
int
threadsMax
)
// maximal number of threads to launch
{
int
num_pairs
=
Correlation2d
.
getNumPairs
(
num_cams
);
//change to fixed 511?
final
int
task_code
=
((
1
<<
num_pairs
)-
1
)
<<
GPUTileProcessor
.
TASK_CORR_BITS
;
// correlation only
final
double
min_px
=
margin
;
final
double
max_px
=
geometryCorrection
.
getSensorWH
()[
0
]
-
1
-
margin
;
// sensor width here, not window width
final
double
[]
min_py
=
new
double
[
num_cams
]
;
final
double
[]
max_py
=
new
double
[
num_cams
]
;
for
(
int
i
=
0
;
i
<
num_cams
;
i
++)
{
min_py
[
i
]
=
margin
+
(
calcPortsCoordinatesAndDerivatives
?
geometryCorrection
.
getWOITops
()[
i
]
:
0
);
// camera_heights array is only set during conditionImageSet(), not called by the intersceneAccumulate()
// That was correct, as all scenes should be conditioned
max_py
[
i
]
=
geometryCorrection
.
getSensorWH
()[
1
]
-
1
-
margin
;
// same for all channels?
//.getSensorWH()[0]
}
if
(
valid_tiles
!=
null
)
{
Arrays
.
fill
(
valid_tiles
,
false
);
}
final
int
tilesX
=
img_width
/
GPUTileProcessor
.
DTT_SIZE
;
final
int
tiles
=
pXpYD
.
length
;
final
Matrix
[]
corr_rots
=
geometryCorrection
.
getCorrVector
().
getRotMatrices
();
// get array of per-sensor rotation matrices
final
int
quad_main
=
(
geometryCorrection
!=
null
)?
num_cams:
0
;
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
threadsMax
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
00
);
final
AtomicInteger
aTiles
=
new
AtomicInteger
(
0
);
final
TpTask
[][]
tp_tasks
=
new
TpTask
[
2
][
tiles
];
// aTiles.get()]; // [0] - main, [1] - shifted
final
double
mb_len_scale
=
-
Math
.
log
(
1.0
-
1.0
/
mb_max_gain
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
@Override
public
void
run
()
{
for
(
int
nTile
=
ai
.
getAndIncrement
();
nTile
<
tiles
;
nTile
=
ai
.
getAndIncrement
())
if
((
pXpYD
[
nTile
]
!=
null
)
&&
(
mb_vectors
[
nTile
]
!=
null
)
&&
((
selection
==
null
)
||
selection
[
nTile
]))
{
int
tileY
=
nTile
/
tilesX
;
int
tileX
=
nTile
%
tilesX
;
TpTask
tp_task
=
new
TpTask
(
num_cams
,
tileX
,
tileY
);
TpTask
tp_task_sub
=
new
TpTask
(
num_cams
,
tileX
,
tileY
);
tp_task
.
task
=
task_code
;
tp_task_sub
.
task
=
task_code
;
double
disparity
=
pXpYD
[
nTile
][
2
]
+
disparity_corr
;
tp_task
.
target_disparity
=
(
float
)
disparity
;
// will it be used?
tp_task_sub
.
target_disparity
=
tp_task
.
target_disparity
;
// will it be used?
double
[]
centerXY
=
pXpYD
[
nTile
];
tp_task
.
setCenterXY
(
centerXY
);
// this pair of coordinates will be used by GPU to set tp_task.xy and task.disp_dist!
// calculate offset for the secondary tile and weigh
double
dx
=
mb_vectors
[
nTile
][
0
];
double
dy
=
mb_vectors
[
nTile
][
1
];
double
mb_len
=
Math
.
sqrt
(
dx
*
dx
+
dy
*
dy
);
// in pixels/s
dx
/=
mb_len
;
// unit vector
dy
/=
mb_len
;
mb_len
*=
mb_tau
;
// now in pixels
double
mb_offs
=
1.0
;
// try 1 pixel. Maybe adjust for non-ortho, e.g. sqrt(2) for diagonal?
double
min_offs
=
mb_len_scale
*
mb_len
;
if
(
mb_offs
<
min_offs
)
{
mb_offs
=
min_offs
;
}
dx
*=
mb_offs
;
dy
*=
mb_offs
;
double
[]
centerXY_sub
=
{
centerXY
[
0
]+
dx
,
centerXY
[
1
]+
dy
};
tp_task_sub
.
setCenterXY
(
centerXY_sub
);
double
exp_offs
=
Math
.
exp
(-
mb_offs
/
mb_len
);
double
gain
=
1.0
/(
1.0
-
exp_offs
);
double
gain_sub
=
-
gain
*
exp_offs
;
tp_task
.
setScale
(
gain
);
tp_task_sub
.
setScale
(
gain_sub
);
boolean
bad_margins
=
false
;
if
(
calcPortsCoordinatesAndDerivatives
)
{
// for non-GPU?
double
[][]
disp_dist
=
new
double
[
quad_main
][];
// used to correct 3D correlations (not yet used here)
double
[][]
centersXY_main
=
geometryCorrection
.
getPortsCoordinatesAndDerivatives
(
geometryCorrection
,
// GeometryCorrection gc_main,
false
,
// boolean use_rig_offsets,
corr_rots
,
// Matrix [] rots,
null
,
// Matrix [][] deriv_rots,
null
,
// double [][] pXYderiv, // if not null, should be double[8][]
disp_dist
,
// used to correct 3D correlations
centerXY
[
0
],
centerXY
[
1
],
disparity
);
// + disparity_corr);
tp_task
.
setDispDist
(
disp_dist
);
tp_task
.
xy
=
new
float
[
centersXY_main
.
length
][
2
];
for
(
int
i
=
0
;
i
<
centersXY_main
.
length
;
i
++)
{
if
(
(
centersXY_main
[
i
][
0
]
<
min_px
)
||
(
centersXY_main
[
i
][
0
]
>
max_px
)
||
(
centersXY_main
[
i
][
1
]
<
min_py
[
i
])
||
(
centersXY_main
[
i
][
1
]
>
max_py
[
i
]))
{
bad_margins
=
true
;
break
;
}
tp_task
.
xy
[
i
][
0
]
=
(
float
)
centersXY_main
[
i
][
0
];
tp_task
.
xy
[
i
][
1
]
=
(
float
)
centersXY_main
[
i
][
1
];
}
// same for the second entry
double
[][]
disp_dist_sub
=
new
double
[
quad_main
][];
// used to correct 3D correlations (not yet used here)
double
[][]
centersXY_main_sub
=
geometryCorrection
.
getPortsCoordinatesAndDerivatives
(
geometryCorrection
,
// GeometryCorrection gc_main,
false
,
// boolean use_rig_offsets,
corr_rots
,
// Matrix [] rots,
null
,
// Matrix [][] deriv_rots,
null
,
// double [][] pXYderiv, // if not null, should be double[8][]
disp_dist_sub
,
// used to correct 3D correlations
centerXY_sub
[
0
],
centerXY_sub
[
1
],
disparity
);
// + disparity_corr);
tp_task_sub
.
setDispDist
(
disp_dist
);
tp_task_sub
.
xy
=
new
float
[
centersXY_main
.
length
][
2
];
for
(
int
i
=
0
;
i
<
centersXY_main
.
length
;
i
++)
{
if
(
(
centersXY_main
[
i
][
0
]
<
min_px
)
||
(
centersXY_main
[
i
][
0
]
>
max_px
)
||
(
centersXY_main
[
i
][
1
]
<
min_py
[
i
])
||
(
centersXY_main
[
i
][
1
]
>
max_py
[
i
]))
{
bad_margins
=
true
;
break
;
}
tp_task_sub
.
xy
[
i
][
0
]
=
(
float
)
centersXY_main_sub
[
i
][
0
];
tp_task_sub
.
xy
[
i
][
1
]
=
(
float
)
centersXY_main_sub
[
i
][
1
];
}
}
else
{
// only check center for margins
if
(
(
centerXY
[
0
]
<
min_px
)
||
(
centerXY
[
0
]
>
max_px
)
||
(
centerXY
[
1
]
<
min_py
[
0
])
||
(
centerXY
[
1
]
>
max_py
[
0
])
||
(
centerXY_sub
[
0
]
<
min_px
)
||
(
centerXY_sub
[
0
]
>
max_px
)
||
(
centerXY_sub
[
1
]
<
min_py
[
0
])
||
(
centerXY_sub
[
1
]
>
max_py
[
0
]))
{
bad_margins
=
true
;
// break;
}
}
if
(
bad_margins
)
{
continue
;
}
int
tp_task_index
=
aTiles
.
getAndIncrement
();
tp_tasks
[
0
][
tp_task_index
]
=
tp_task
;
tp_tasks
[
1
][
tp_task_index
]
=
tp_task_sub
;
if
(
valid_tiles
!=
null
)
{
valid_tiles
[
nTile
]
=
true
;
}
}
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
final
TpTask
[][]
tp_tasks_out
=
new
TpTask
[
2
][
aTiles
.
get
()];
System
.
arraycopy
(
tp_tasks
[
0
],
0
,
tp_tasks_out
[
0
],
0
,
tp_tasks_out
[
0
].
length
);
System
.
arraycopy
(
tp_tasks
[
1
],
0
,
tp_tasks_out
[
1
],
0
,
tp_tasks_out
[
1
].
length
);
return
tp_tasks_out
;
}
public
void
setLpfRbg
(
float
[][]
lpf_rbg
,
// 4 64-el. arrays: r,b,g,m
...
...
src/main/java/com/elphel/imagej/gpu/TpTask.java
View file @
7d6fb681
...
...
@@ -11,13 +11,28 @@ public class TpTask {
public
float
[][]
xy_aux
=
null
;
public
float
[][]
disp_dist
=
null
;
// public float weight;
public
float
scale
=
0.0f
;
// for motion blur correction
// 0.0 - set (as it was). >0 multiply and set. <0 multiply and accumulate
public
static
int
getSize
(
int
num_sensors
)
{
return
5
+
2
*
num_sensors
+
4
*
num_sensors
;
// return 5 + 2* num_sensors + 4 * num_sensors;
return
6
+
2
*
num_sensors
+
4
*
num_sensors
;
// added scale
}
public
int
getSize
()
{
return
5
+
2
*
num_sensors
+
4
*
num_sensors
;
// return 5 + 2* num_sensors + 4 * num_sensors;
return
getSize
(
num_sensors
);
}
public
void
setScale
(
float
scale
)
{
this
.
scale
=
scale
;
}
public
void
setScale
(
double
scale
)
{
this
.
scale
=
(
float
)
scale
;
}
public
float
getScale
()
{
return
scale
;
}
public
TpTask
(
int
num_sensors
,
...
...
@@ -54,6 +69,7 @@ public class TpTask {
target_disparity
=
flt
[
indx
++];
// 2
centerXY
[
0
]
=
flt
[
indx
++];
// 3
centerXY
[
1
]
=
flt
[
indx
++];
// 4
scale
=
flt
[
indx
++];
// 5
if
(
use_aux
)
{
xy_aux
=
new
float
[
num_sensors
][
2
];
for
(
int
i
=
0
;
i
<
num_sensors
;
i
++)
{
...
...
@@ -165,7 +181,7 @@ public class TpTask {
flt
[
indx
++]
=
this
.
target_disparity
;
// 2
flt
[
indx
++]
=
centerXY
[
0
];
// 3
flt
[
indx
++]
=
centerXY
[
1
];
// 4
flt
[
indx
++]
=
scale
;
// 5
float
[][]
offsets
=
use_aux
?
this
.
xy_aux
:
this
.
xy
;
for
(
int
i
=
0
;
i
<
num_sensors
;
i
++)
{
if
(
offsets
!=
null
)
{
...
...
src/main/java/com/elphel/imagej/tileprocessor/ImageDtt.java
View file @
7d6fb681
...
...
@@ -1199,12 +1199,56 @@ public class ImageDtt extends ImageDttCPU {
gpuQuad
.
updateTasks
(
tp_tasks
,
false
);
// boolean use_aux // while is it in class member? - just to be able to free
// Skipping if ((fdisp_dist != null) || (fpxpy != null)) {...
// int [] wh = null;
// int erase_clt = 1; // NaN;
gpuQuad
.
execConvertDirect
(
use_reference_buffer
,
wh
,
erase_clt
);
// put results into a "reference" buffer
}
public
void
setReferenceTDMotionBlur
(
final
int
erase_clt
,
final
int
[]
wh
,
// null (use sensor dimensions) or pair {width, height} in pixels
final
ImageDttParameters
imgdtt_params
,
// Now just extra correlation parameters, later will include, most others
final
boolean
use_reference_buffer
,
final
TpTask
[][]
tp_tasks
,
final
double
gpu_sigma_r
,
// 0.9, 1.1
final
double
gpu_sigma_b
,
// 0.9, 1.1
final
double
gpu_sigma_g
,
// 0.6, 0.7
final
double
gpu_sigma_m
,
// = 0.4; // 0.7;
final
int
threadsMax
,
// maximal number of threads to launch
final
int
globalDebugLevel
)
{
final
float
[][]
lpf_rgb
=
new
float
[][]
{
floatGetCltLpfFd
(
gpu_sigma_r
),
floatGetCltLpfFd
(
gpu_sigma_b
),
floatGetCltLpfFd
(
gpu_sigma_g
),
floatGetCltLpfFd
(
gpu_sigma_m
)
};
gpuQuad
.
setLpfRbg
(
// constants memory - same for all cameras
lpf_rgb
,
globalDebugLevel
>
2
);
gpuQuad
.
setTasks
(
// copy tp_tasks to the GPU memory
tp_tasks
[
0
],
// TpTask [] tile_tasks,
false
,
// use_aux); // boolean use_aux)
imgdtt_params
.
gpu_verify
);
// boolean verify
// Why always NON-UNIFORM grid? Already set in tp_tasks
gpuQuad
.
execSetTilesOffsets
(
false
);
// false); // prepare tiles offsets in GPU memory, using NON-UNIFORM grid (pre-calculated)
// update tp_tasks
gpuQuad
.
updateTasks
(
tp_tasks
[
0
],
false
);
// boolean use_aux // while is it in class member? - just to be able to free
gpuQuad
.
execConvertDirect
(
use_reference_buffer
,
wh
,
erase_clt
);
// put results into a "reference" buffer
// second tasks (subtracting MB)
gpuQuad
.
setTasks
(
// copy tp_tasks to the GPU memory
tp_tasks
[
1
],
// TpTask [] tile_tasks,
false
,
// use_aux); // boolean use_aux)
imgdtt_params
.
gpu_verify
);
// boolean verify
// Why always NON-UNIFORM grid? Already set in tp_tasks
gpuQuad
.
execSetTilesOffsets
(
false
);
// false); // prepare tiles offsets in GPU memory, using NON-UNIFORM grid (pre-calculated)
// update tp_tasks
gpuQuad
.
updateTasks
(
tp_tasks
[
1
],
false
);
// boolean use_aux // while is it in class member? - just to be able to free
gpuQuad
.
execConvertDirect
(
use_reference_buffer
,
wh
,
-
1
);
// erase_clt); // put results into a "reference" buffer
}
...
...
src/main/java/com/elphel/imagej/tileprocessor/IntersceneLma.java
View file @
7d6fb681
...
...
@@ -43,6 +43,12 @@ public class IntersceneLma {
this
.
thread_invariant
=
thread_invariant
;
this
.
opticalFlow
=
opticalFlow
;
}
public
double
[][]
getLastJT
(){
return
last_jt
;
}
public
double
[]
getLastRms
()
{
return
last_rms
;
}
...
...
@@ -171,19 +177,9 @@ public class IntersceneLma {
final
int
debug_level
)
{
scenesCLT
=
new
QuadCLT
[]
{
reference_QuadClt
,
scene_QuadClt
};
// this.vector_XYS = vector_XYS;
par_mask
=
param_select
;
macrotile_centers
=
centers
;
num_samples
=
2
*
centers
.
length
;
/*
for (int i = 0; i < vector_XYS.length; i++){
if (((vector_XYS[i] == null) && (centers[i]!=null)) ||
((vector_XYS[i] != null) && (centers[i]==null))) {
vector_XYS[i] = null;
centers[i]= null;
}
}
*/
ErsCorrection
ers_ref
=
reference_QuadClt
.
getErsCorrection
();
ErsCorrection
ers_scene
=
scene_QuadClt
.
getErsCorrection
();
final
double
[]
scene_xyz
=
(
scene_xyz0
!=
null
)
?
scene_xyz0
:
ers_scene
.
camera_xyz
;
...
...
@@ -201,19 +197,22 @@ public class IntersceneLma {
scene_atr
[
0
],
scene_atr
[
1
],
scene_atr
[
2
],
scene_xyz
[
0
],
scene_xyz
[
1
],
scene_xyz
[
2
]};
parameters_full
=
full_parameters_vector
.
clone
();
if
(
first_run
||
(
backup_parameters_full
==
null
))
{
if
(
(
vector_XYS
!=
null
)
&&
(
first_run
||
(
backup_parameters_full
==
null
)
))
{
backup_parameters_full
=
full_parameters_vector
.
clone
();
}
int
num_pars
=
0
;
for
(
int
i
=
0
;
i
<
par_mask
.
length
;
i
++)
if
(
par_mask
[
i
])
num_pars
++;
par_indices
=
new
int
[
num_pars
];
num_pars
=
0
0
;
num_pars
=
0
;
for
(
int
i
=
0
;
i
<
par_mask
.
length
;
i
++)
if
(
par_mask
[
i
])
par_indices
[
num_pars
++]
=
i
;
parameters_vector
=
new
double
[
par_indices
.
length
];
for
(
int
i
=
0
;
i
<
par_indices
.
length
;
i
++)
parameters_vector
[
i
]
=
full_parameters_vector
[
par_indices
[
i
]];
// parameters_initial = parameters_vector.clone();
setSamplesWeights
(
vector_XYS
);
// not regularized yet !
if
(
vector_XYS
!=
null
)
{
// skip when used for the motion blur vectors, not LMA
setSamplesWeights
(
vector_XYS
);
// not regularized yet !
}
else
{
weights
=
null
;
// new double[2 * centers.length];
}
last_jt
=
new
double
[
parameters_vector
.
length
][];
if
(
debug_level
>
1
)
{
...
...
@@ -225,6 +224,10 @@ public class IntersceneLma {
scenesCLT
[
1
],
// final QuadCLT scene_QuadClt,
scenesCLT
[
0
],
// final QuadCLT reference_QuadClt,
debug_level
);
// final int debug_level)
if
(
vector_XYS
==
null
)
{
return
;
// for MB vectors (noLMA)
}
double
[][]
wjtj
=
getWJtJlambda
(
// USED in lwir all NAN
0.0
,
// final double lambda,
last_jt
);
// final double [][] jt) all 0???
...
...
@@ -727,10 +730,12 @@ public class IntersceneLma {
final
double
[]
scene_atr
=
new
double
[
3
];
final
double
[]
reference_xyz
=
new
double
[
3
];
// will stay 0
final
double
[]
reference_atr
=
new
double
[
3
];
// will stay 0
final
double
[]
fx
=
new
double
[
weights
.
length
];
final
boolean
mb_mode
=
(
weights
==
null
);
final
int
weights_length
=
mb_mode
?
(
2
*
macrotile_centers
.
length
)
:
weights
.
length
;
final
double
[]
fx
=
mb_mode
?
null
:
(
new
double
[
weights_length
]);
// weights.length]; : weights.length :
if
(
jt
!=
null
)
{
for
(
int
i
=
0
;
i
<
jt
.
length
;
i
++)
{
jt
[
i
]
=
new
double
[
weights
.
length
];
jt
[
i
]
=
new
double
[
weights
_length
];
// weights
.length];
}
}
...
...
@@ -758,14 +763,13 @@ public class IntersceneLma {
scene_atr
,
// double [] atr);
false
)[
0
];
// boolean invert));
// double [][][] derivs = new double [macrotile_centers.length + parameters_vector.length][][];
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
opticalFlow
.
threadsMax
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
for
(
int
iMTile
=
ai
.
getAndIncrement
();
iMTile
<
macrotile_centers
.
length
;
iMTile
=
ai
.
getAndIncrement
())
{
if
((
macrotile_centers
[
iMTile
]!=
null
)
&&
(
weights
[
2
*
iMTile
]
>
0.0
)){
// was: weights[iMTile]?
if
((
macrotile_centers
[
iMTile
]!=
null
)
&&
(
mb_mode
||
(
weights
[
2
*
iMTile
]
>
0.0
)
)){
// was: weights[iMTile]?
//infinity_disparity
boolean
is_infinity
=
macrotile_centers
[
iMTile
][
2
]
<
infinity_disparity
;
double
[][]
deriv_params
=
ers_ref
.
getDPxSceneDParameters
(
...
...
@@ -782,8 +786,10 @@ public class IntersceneLma {
if
(
deriv_params
!=
null
)
{
boolean
bad_tile
=
false
;
if
(!
bad_tile
)
{
fx
[
2
*
iMTile
+
0
]
=
deriv_params
[
0
][
0
];
// pX
fx
[
2
*
iMTile
+
1
]
=
deriv_params
[
0
][
1
];
// pY
if
(!
mb_mode
)
{
fx
[
2
*
iMTile
+
0
]
=
deriv_params
[
0
][
0
];
// pX
fx
[
2
*
iMTile
+
1
]
=
deriv_params
[
0
][
1
];
// pY
}
if
(
jt
!=
null
)
{
for
(
int
i
=
0
;
i
<
par_indices
.
length
;
i
++)
{
int
indx
=
par_indices
[
i
]
+
1
;
...
...
@@ -792,6 +798,11 @@ public class IntersceneLma {
}
}
}
}
else
if
(
mb_mode
)
{
for
(
int
i
=
0
;
i
<
par_indices
.
length
;
i
++)
{
jt
[
i
][
2
*
iMTile
+
0
]
=
Double
.
NaN
;
// pX
jt
[
i
][
2
*
iMTile
+
1
]
=
Double
.
NaN
;
// ; // pY (disparity is not used)
}
}
}
}
...
...
@@ -799,6 +810,9 @@ public class IntersceneLma {
};
}
ImageDtt
.
startAndJoin
(
threads
);
if
(
mb_mode
)
{
return
null
;
}
// pull to the initial parameter values
for
(
int
i
=
0
;
i
<
par_indices
.
length
;
i
++)
{
fx
[
i
+
2
*
macrotile_centers
.
length
]
=
vector
[
i
];
// - parameters_initial[i]; // scale will be combined with weights
...
...
src/main/java/com/elphel/imagej/tileprocessor/IntersceneMatchParameters.java
View file @
7d6fb681
...
...
@@ -236,7 +236,12 @@ public class IntersceneMatchParameters {
// Boost amount
public
double
eq_weight_add
=
0.03
;
// calculate from min-strengths
public
double
eq_weight_scale
=
20.0
;
// maximal boost ratio
public
double
eq_level
=
0.9
;
// equalization level (0.0 - leave as is, 1.0 - boost to have the same supertile strength as average)
public
double
eq_level
=
0.9
;
// equalization level (0.0 - leave as is, 1.0 - boost to have the same supertile strength as average)
public
boolean
mb_en
=
true
;
public
double
mb_tau
=
0.008
;
// time constant, sec
public
double
mb_max_gain
=
5.0
;
// motion blur maximal gain (if more - move second point more than a pixel
public
boolean
stereo_merge
=
true
;
public
int
stereo_gap
=
32
;
// pixels between right and left frames
...
...
@@ -710,8 +715,15 @@ public class IntersceneMatchParameters {
gd
.
addNumericField
(
"Equalization level"
,
this
.
eq_level
,
5
,
7
,
""
,
"Target supertile strength will be set to: 0 - original strength (no modification), 1.0 - average supertile strength."
);
gd
.
addTab
(
"Stereo/Video"
,
"Parameters for stereo video generation"
);
gd
.
addTab
(
"MB"
,
"Motion Blur"
);
gd
.
addCheckbox
(
"Compensate motion blur"
,
this
.
mb_en
,
"Ebable motion blur correction."
);
gd
.
addNumericField
(
"Sensor time constant"
,
this
.
mb_tau
,
5
,
7
,
"s"
,
"Sensor exponential decay in seconds."
);
gd
.
addNumericField
(
"Maximal gain"
,
this
.
mb_max_gain
,
5
,
7
,
"x"
,
"Maximal gain for motion blur correction (if needed more for 1 pixel, increase offset)."
);
gd
.
addTab
(
"Stereo/Video"
,
"Parameters for stereo video generation"
);
gd
.
addMessage
(
"Stereo"
);
if
(
stereo_views
.
length
>
0
)
{
String
[]
stereo_choices
=
new
String
[
stereo_views
.
length
+
1
];
...
...
@@ -987,9 +999,11 @@ public class IntersceneMatchParameters {
this
.
eq_max_disparity
=
gd
.
getNextNumber
();
this
.
eq_weight_add
=
gd
.
getNextNumber
();
this
.
eq_weight_scale
=
gd
.
getNextNumber
();
this
.
eq_level
=
gd
.
getNextNumber
();
this
.
eq_level
=
gd
.
getNextNumber
();
this
.
mb_en
=
gd
.
getNextBoolean
();
this
.
mb_tau
=
gd
.
getNextNumber
();
this
.
mb_max_gain
=
gd
.
getNextNumber
();
if
(
stereo_views
.
length
>
0
)
{
int
i
=
gd
.
getNextChoiceIndex
();
...
...
@@ -1260,10 +1274,14 @@ public class IntersceneMatchParameters {
properties
.
setProperty
(
prefix
+
"eq_weight_scale"
,
this
.
eq_weight_scale
+
""
);
// double
properties
.
setProperty
(
prefix
+
"eq_level"
,
this
.
eq_level
+
""
);
// double
properties
.
setProperty
(
prefix
+
"stereo_merge"
,
this
.
stereo_merge
+
""
);
// boolean
properties
.
setProperty
(
prefix
+
"stereo_gap"
,
this
.
stereo_gap
+
""
);
// int
properties
.
setProperty
(
prefix
+
"stereo_intereye"
,
this
.
stereo_intereye
+
""
);
// double
properties
.
setProperty
(
prefix
+
"stereo_phone_width"
,
this
.
stereo_phone_width
+
""
);
// double
properties
.
setProperty
(
prefix
+
"mb_en"
,
this
.
mb_en
+
""
);
// boolean
properties
.
setProperty
(
prefix
+
"mb_tau"
,
this
.
mb_tau
+
""
);
// double
properties
.
setProperty
(
prefix
+
"mb_max_gain"
,
this
.
mb_max_gain
+
""
);
// double
properties
.
setProperty
(
prefix
+
"stereo_merge"
,
this
.
stereo_merge
+
""
);
// boolean
properties
.
setProperty
(
prefix
+
"stereo_gap"
,
this
.
stereo_gap
+
""
);
// int
properties
.
setProperty
(
prefix
+
"stereo_intereye"
,
this
.
stereo_intereye
+
""
);
// double
properties
.
setProperty
(
prefix
+
"stereo_phone_width"
,
this
.
stereo_phone_width
+
""
);
// double
properties
.
setProperty
(
prefix
+
"extra_hor_tile"
,
this
.
extra_hor_tile
+
""
);
// int
properties
.
setProperty
(
prefix
+
"extra_vert_tile"
,
this
.
extra_vert_tile
+
""
);
// int
properties
.
setProperty
(
prefix
+
"crop_3d"
,
this
.
crop_3d
+
""
);
// boolean
...
...
@@ -1484,6 +1502,10 @@ public class IntersceneMatchParameters {
if
(
properties
.
getProperty
(
prefix
+
"eq_weight_scale"
)!=
null
)
this
.
eq_weight_scale
=
Double
.
parseDouble
(
properties
.
getProperty
(
prefix
+
"eq_weight_scale"
));
if
(
properties
.
getProperty
(
prefix
+
"eq_level"
)!=
null
)
this
.
eq_level
=
Double
.
parseDouble
(
properties
.
getProperty
(
prefix
+
"eq_level"
));
if
(
properties
.
getProperty
(
prefix
+
"mb_en"
)!=
null
)
this
.
mb_en
=
Boolean
.
parseBoolean
(
properties
.
getProperty
(
prefix
+
"mb_en"
));
if
(
properties
.
getProperty
(
prefix
+
"mb_tau"
)!=
null
)
this
.
mb_tau
=
Double
.
parseDouble
(
properties
.
getProperty
(
prefix
+
"mb_tau"
));
if
(
properties
.
getProperty
(
prefix
+
"mb_max_gain"
)!=
null
)
this
.
mb_max_gain
=
Double
.
parseDouble
(
properties
.
getProperty
(
prefix
+
"mb_max_gain"
));
if
(
properties
.
getProperty
(
prefix
+
"stereo_merge"
)!=
null
)
this
.
stereo_merge
=
Boolean
.
parseBoolean
(
properties
.
getProperty
(
prefix
+
"stereo_merge"
));
if
(
properties
.
getProperty
(
prefix
+
"stereo_gap"
)!=
null
)
this
.
stereo_gap
=
Integer
.
parseInt
(
properties
.
getProperty
(
prefix
+
"stereo_gap"
));
if
(
properties
.
getProperty
(
prefix
+
"stereo_intereye"
)!=
null
)
this
.
stereo_intereye
=
Double
.
parseDouble
(
properties
.
getProperty
(
prefix
+
"stereo_intereye"
));
...
...
@@ -1724,10 +1746,14 @@ public class IntersceneMatchParameters {
imp
.
eq_weight_scale
=
this
.
eq_weight_scale
;
imp
.
eq_level
=
this
.
eq_level
;
imp
.
stereo_merge
=
this
.
stereo_merge
;
imp
.
stereo_gap
=
this
.
stereo_gap
;
imp
.
stereo_intereye
=
this
.
stereo_intereye
;
imp
.
stereo_phone_width
=
this
.
stereo_phone_width
;
imp
.
mb_en
=
this
.
mb_en
;
imp
.
mb_tau
=
this
.
mb_tau
;
imp
.
mb_max_gain
=
this
.
mb_max_gain
;
imp
.
stereo_merge
=
this
.
stereo_merge
;
imp
.
stereo_gap
=
this
.
stereo_gap
;
imp
.
stereo_intereye
=
this
.
stereo_intereye
;
imp
.
stereo_phone_width
=
this
.
stereo_phone_width
;
imp
.
extra_hor_tile
=
this
.
extra_hor_tile
;
imp
.
extra_vert_tile
=
this
.
extra_vert_tile
;
...
...
src/main/java/com/elphel/imagej/tileprocessor/OpticalFlow.java
View file @
7d6fb681
...
...
@@ -4557,7 +4557,6 @@ public class OpticalFlow {
((
quadCLTs
[
ref_index
].
getNumAccum
()
<
quadCLTs
[
ref_index
].
getNumOrient
())||
(
quadCLTs
[
ref_index
].
getNumOrient
()
>=
min_num_orient
)))
{
// should skip scenes w/o orientation 06/29/2022
combo_dsn_final
=
intersceneExport
(
// result indexed by COMBO_DSN_TITLES, COMBO_DSN_INDX_***
clt_parameters
,
// CLTParameters clt_parameters,
ers_reference
,
// ErsCorrection ers_reference,
...
...
@@ -4565,8 +4564,6 @@ public class OpticalFlow {
colorProcParameters
,
// ColorProcParameters colorProcParameters,
debugLevel
);
// int debug_level
quadCLTs
[
ref_index
].
inc_accum
();
// save with updated num_accum
quadCLTs
[
ref_index
].
saveInterProperties
(
// save properties for interscene processing (extrinsics, ers, ...)
...
...
@@ -4959,7 +4956,6 @@ public class OpticalFlow {
ImagePlus
imp_video
=
imp_scenes_pair
[
nstereo
];
boolean
[]
combine_modes
=
{!
combine_left_right
,
stereo_merge
&&
combine_left_right
,
anaglyth_en
&&
!
toRGB
&&
combine_left_right
};
for
(
int
istereo_mode
=
0
;
istereo_mode
<
combine_modes
.
length
;
istereo_mode
++)
if
(
combine_modes
[
istereo_mode
])
{
// if (combine_left_right) { // combine pairs multi-threaded
if
(
istereo_mode
==
1
)
{
// combine pairs for "Google" VR
final
int
left_width
=
imp_scenes_pair
[
0
].
getWidth
();
final
int
right_width
=
imp_scenes_pair
[
1
].
getWidth
();
...
...
@@ -5007,17 +5003,8 @@ public class OpticalFlow {
imp_video
.
setStack
(
stereo_stack
);
String
title
=
imp_scenes_pair
[
1
].
getTitle
();
imp_video
.
setTitle
(
title
.
replace
(
"-RIGHT"
,
"-STEREO"
));
// convert stereo_stack to imp_scenes_pair[1], keeping calibration and fps?
/// imp_scenes_pair[1].setStack(stereo_stack);
/// String title = imp_scenes_pair[1].getTitle();
/// imp_video = new ImagePlus(
/// imp_scenes_pair[1].getTitle().replace("-RIGHT","-STEREO"),
/// stereo_stack);
/// imp_scenes_pair[1].setTitle(title.replace("-RIGHT","-STEREO"));
}
else
if
(
istereo_mode
==
2
)
{
// combine anaglyph
// final Color anaglyph_left = clt_parameters.imp.anaglyph_left;
// final Color anaglyph_right = clt_parameters.imp.anaglyph_right;
final
double
[]
left_rgb
=
{
anaglyph_left
.
getRed
()/
255.0
,
anaglyph_left
.
getGreen
()/
255.0
,
...
...
@@ -5066,8 +5053,6 @@ public class OpticalFlow {
imp_video
.
setStack
(
stereo_stack
);
String
title
=
imp_scenes_pair
[
1
].
getTitle
();
imp_video
.
setTitle
(
title
.
replace
(
"-RIGHT"
,
"-ANAGLYPH"
));
/// String title = imp_scenes_pair[1].getTitle();
/// imp_scenes_pair[1].setTitle(title.replace("-RIGHT","-ANAGLYPH"));
}
// if (istereo_mode == 1) {if (combine_left_right) { // combine pairs multi-threaded
String
avi_path
=
null
;
video:
...
...
@@ -5090,7 +5075,6 @@ public class OpticalFlow {
if
(
avi_path
==
null
)
{
break
video
;
}
// int img_width=imp_scenes_pair[nstereo].getWidth();
int
img_width
=
imp_video
.
getWidth
();
int
stereo_width
=
combine_left_right
?
img_width:
0
;
stereo_widths_list
.
add
(
stereo_width
);
...
...
@@ -5202,6 +5186,7 @@ public class OpticalFlow {
null
,
// testr, // null, // final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters
,
// CLTParameters clt_parameters,
constant_disparity
,
// double [] disparity_ref,
ZERO3
,
// final double [] scene_xyz, // camera center in world coordinates
ZERO3
,
// new double[] {.1,0.1,.1}, // ZERO3, // final double [] scene_atr, // camera orientation relative to world frame
quadCLTs
[
ref_index
],
// final QuadCLT scene,
...
...
@@ -13404,7 +13389,13 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
boolean
[]
reliable_ref
,
// null or bitmask of reliable reference tiles
QuadCLT
[]
quadCLTs
,
int
debugLevel
)
{
{
boolean
test_motion_blur
=
true
;
boolean
mb_en
=
clt_parameters
.
imp
.
mb_en
;
double
mb_tau
=
clt_parameters
.
imp
.
mb_tau
;
// 0.008; // time constant, sec
double
mb_max_gain
=
clt_parameters
.
imp
.
mb_max_gain
;
// 5.0; // motion blur maximal gain (if more - move second point more than a pixel
int
earliest_scene
=
0
;
boolean
use_combo_dsi
=
clt_parameters
.
imp
.
use_combo_dsi
;
boolean
use_lma_dsi
=
clt_parameters
.
imp
.
use_lma_dsi
;
...
...
@@ -13440,13 +13431,33 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
}
}
}
double
[][]
ref_pXpYD
=
null
;
double
[][]
dbg_mb_img
=
null
;
double
[]
mb_ref_disparity
=
null
;
if
(
test_motion_blur
)
{
mb_ref_disparity
=
interscene_ref_disparity
;
if
(
mb_ref_disparity
==
null
)
{
mb_ref_disparity
=
quadCLTs
[
ref_index
].
getDLS
()[
use_lma_dsi
?
1
:
0
];
}
ref_pXpYD
=
transformToScenePxPyD
(
// full size - [tilesX*tilesY], some nulls
null
,
// final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
mb_ref_disparity
,
// dls[0], // final double [] disparity_ref, // invalid tiles - NaN in disparity (maybe it should not be masked by margins?)
ZERO3
,
// final double [] scene_xyz, // camera center in world coordinates
ZERO3
,
// final double [] scene_atr, // camera orientation relative to world frame
quadCLTs
[
ref_index
],
// final QuadCLT scene_QuadClt,
quadCLTs
[
ref_index
]);
// final QuadCLT reference_QuadClt)
dbg_mb_img
=
new
double
[
quadCLTs
.
length
][];
}
ErsCorrection
ers_reference
=
quadCLTs
[
ref_index
].
getErsCorrection
();
double
[][][]
dxyzatr_dt
=
new
double
[
quadCLTs
.
length
][][];
double
[][][]
scenes_xyzatr
=
new
double
[
quadCLTs
.
length
][][];
// previous scene relative to the next one
scenes_xyzatr
[
ref_index
]
=
new
double
[
2
][
3
];
// all zeros
// should have at least next or previous non-null
int
debug_scene
=
-
15
;
double
maximal_series_rms
=
0.00
;
for
(
int
nscene
=
ref_index
;
nscene
>=
earliest_scene
;
nscene
--)
{
if
((
quadCLTs
[
nscene
]
==
null
)
||
((
nscene
!=
ref_index
)
&&
...
...
@@ -13468,7 +13479,7 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
nscene1
=
nscene
;
}
if
(
nscene1
==
nscene0
)
{
System
.
out
.
println
(
"**** Isoloated scene!!! skippi
u
ng... now may only happen for a ref_scene****"
);
System
.
out
.
println
(
"**** Isoloated scene!!! skipping... now may only happen for a ref_scene****"
);
earliest_scene
=
nscene
+
1
;
break
;
}
...
...
@@ -13487,15 +13498,88 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
double
[]
scene_atr1
=
(
nscene1
==
ref_index
)?
ZERO3:
ers_reference
.
getSceneATR
(
ts1
);
dxyzatr_dt
[
nscene
]
=
new
double
[
2
][
3
];
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
dxyzatr_dt
[
nscene
][
0
][
i
]
=
0.0
;
//
(scene_xyz1[i]-scene_xyz0[i])/dt;
dxyzatr_dt
[
nscene
][
0
][
i
]
=
(
scene_xyz1
[
i
]-
scene_xyz0
[
i
])/
dt
;
dxyzatr_dt
[
nscene
][
1
][
i
]
=
(
scene_atr1
[
i
]-
scene_atr0
[
i
])/
dt
;
}
double
[]
scene_xyz_pre
=
ZERO3
;
double
[]
scene_atr_pre
=
ZERO3
;
quadCLTs
[
nscene
].
getErsCorrection
().
setErsDt
(
// set for ref also (should be set before non-ref!)
dxyzatr_dt
[
nscene
][
0
],
// double [] ers_xyz_dt,
ZERO3
,
//, //
dxyzatr_dt[nscene][0], // double [] ers_xyz_dt,
dxyzatr_dt
[
nscene
][
1
]);
// double [] ers_atr_dt)(ers_scene_original_xyz_dt);
int
debug_scene
=
-
15
;
if
(
dbg_mb_img
!=
null
)
{
dbg_mb_img
[
nscene
]
=
new
double
[
tilesX
*
tilesY
*
2
];
Arrays
.
fill
(
dbg_mb_img
[
nscene
],
Double
.
NaN
);
double
[]
mb_scene_xyz
=
(
nscene
!=
ref_index
)?
ers_reference
.
getSceneXYZ
(
ts
):
ZERO3
;
double
[]
mb_scene_atr
=
(
nscene
!=
ref_index
)?
ers_reference
.
getSceneATR
(
ts
):
ZERO3
;
double
[][]
motion_blur
=
getMotionBlur
(
quadCLTs
[
ref_index
],
// QuadCLT ref_scene,
quadCLTs
[
nscene
],
// QuadCLT scene, // can be the same as ref_scene
ref_pXpYD
,
// double [][] ref_pXpYD, // here it is scene, not reference!
mb_scene_xyz
,
// double [] camera_xyz,
mb_scene_atr
,
// double [] camera_atr,
dxyzatr_dt
[
nscene
][
0
],
// double [] camera_xyz_dt,
dxyzatr_dt
[
nscene
][
1
],
// double [] camera_atr_dt,
debugLevel
);
// int debug_level)
for
(
int
nTile
=
0
;
nTile
<
motion_blur
.
length
;
nTile
++)
if
(
motion_blur
[
nTile
]
!=
null
)
{
int
tx
=
nTile
%
tilesX
;
int
ty
=
nTile
/
tilesX
;
dbg_mb_img
[
nscene
][
tx
+
tilesX
*
(
ty
*
2
+
0
)]
=
motion_blur
[
nTile
][
0
];
dbg_mb_img
[
nscene
][
tx
+
tilesX
*
(
ty
*
2
+
1
)]
=
motion_blur
[
nTile
][
1
];
}
boolean
show_corrected
=
false
;
if
(
nscene
==
debug_scene
)
{
System
.
out
.
println
(
"nscene = "
+
nscene
);
System
.
out
.
println
(
"nscene = "
+
nscene
);
}
while
(
show_corrected
)
{
ImagePlus
imp_mbc
=
QuadCLT
.
renderGPUFromDSI
(
-
1
,
// final int sensor_mask,
false
,
// final boolean merge_channels,
null
,
// final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters
,
// CLTParameters clt_parameters,
mb_ref_disparity
,
// double [] disparity_ref,
// motion blur compensation
mb_tau
,
// double mb_tau, // 0.008; // time constant, sec
mb_max_gain
,
// double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
motion_blur
,
// double [][] mb_vectors, //
mb_scene_xyz
,
// ZERO3, // final double [] scene_xyz, // camera center in world coordinates
mb_scene_atr
,
// final double [] scene_atr, // camera orientation relative to world frame
quadCLTs
[
nscene
],
// final QuadCLT scene,
quadCLTs
[
ref_index
],
// final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
false
,
// toRGB, // final boolean toRGB,
clt_parameters
.
imp
.
show_color_nan
,
quadCLTs
[
nscene
].
getImageName
()+
"-MOTION_BLUR_CORRECTED"
,
// String suffix,
threadsMax
,
// int threadsMax,
debugLevel
);
// int debugLevel)
imp_mbc
.
show
();
ImagePlus
imp_mbc_merged
=
QuadCLT
.
renderGPUFromDSI
(
-
1
,
// final int sensor_mask,
true
,
// final boolean merge_channels,
null
,
// final Rectangle full_woi_in, // show larger than sensor WOI (or null)
clt_parameters
,
// CLTParameters clt_parameters,
mb_ref_disparity
,
// double [] disparity_ref,
// motion blur compensation
mb_tau
,
// double mb_tau, // 0.008; // time constant, sec
mb_max_gain
,
// double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
motion_blur
,
// double [][] mb_vectors, //
mb_scene_xyz
,
// ZERO3, // final double [] scene_xyz, // camera center in world coordinates
mb_scene_atr
,
// final double [] scene_atr, // camera orientation relative to world frame
quadCLTs
[
nscene
],
// final QuadCLT scene,
quadCLTs
[
ref_index
],
// final QuadCLT ref_scene, // now - may be null - for testing if scene is rotated ref
false
,
// toRGB, // final boolean toRGB,
clt_parameters
.
imp
.
show_color_nan
,
quadCLTs
[
nscene
].
getImageName
()+
"-MOTION_BLUR_CORRECTED"
,
// String suffix,
threadsMax
,
// int threadsMax,
debugLevel
);
// int debugLevel)
imp_mbc_merged
.
show
();
}
}
// int debug_scene = -15;
if
(
nscene
!=
ref_index
)
{
if
(
nscene
==
debug_scene
)
{
System
.
out
.
println
(
"nscene = "
+
nscene
);
...
...
@@ -13550,7 +13634,20 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
}
}
}
// for (int nscene = ref_index; nscene > earliest_scene; nscene--) {
if
(
dbg_mb_img
!=
null
)
{
String
[]
dbg_mb_titles
=
new
String
[
quadCLTs
.
length
];
for
(
int
i
=
0
;
i
<
quadCLTs
.
length
;
i
++)
if
(
quadCLTs
[
i
]
!=
null
)
{
dbg_mb_titles
[
i
]
=
quadCLTs
[
i
].
getImageName
();
}
(
new
ShowDoubleFloatArrays
()).
showArrays
(
dbg_mb_img
,
tilesX
*
2
,
tilesY
,
true
,
quadCLTs
[
ref_index
].
getImageName
()+
"-MOTION_BLUR"
,
dbg_mb_titles
);
}
if
(
debugLevel
>
-
4
)
{
System
.
out
.
println
(
"All multi scene passes are Done. Maximal RMSE was "
+
maximal_series_rms
);
}
...
...
@@ -13561,6 +13658,84 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
return
earliest_scene
;
}
/**
* Get per-tile motion blur vector
* @param ref_scene reference scene
* @param scene current scene (may be the same as reference)
* @param ref_pXpYD per-tile pX, pY, disparity for reference scene (some may be nulls)
* @param camera_xyz camera x,y,z relative to the reference
* @param camera_atr camera azimuth, tilt, roll relative to the reference
* @param camera_xyz_dt camera linear velocities: x', y', z'
* @param camera_atr_dt camera angular velocities: azimuth', tilt', roll'
* @param debug_level debug level
* @return per-tile array of {dx/dt, dy/dt} vectors, some may be null
*/
public
double
[][]
getMotionBlur
(
QuadCLT
ref_scene
,
QuadCLT
scene
,
// can be the same as ref_scene
double
[][]
ref_pXpYD
,
double
[]
camera_xyz
,
double
[]
camera_atr
,
double
[]
camera_xyz_dt
,
double
[]
camera_atr_dt
,
// boolean fill_gaps,
int
debug_level
)
{
boolean
[]
param_select
=
new
boolean
[
ErsCorrection
.
DP_NUM_PARS
];
final
int
[]
par_indices
=
new
int
[]
{
ErsCorrection
.
DP_DSAZ
,
ErsCorrection
.
DP_DSTL
,
ErsCorrection
.
DP_DSRL
,
ErsCorrection
.
DP_DSX
,
ErsCorrection
.
DP_DSY
,
ErsCorrection
.
DP_DSZ
};
for
(
int
i:
par_indices
)
{
param_select
[
i
]=
true
;
}
final
double
[]
camera_dt
=
new
double
[]
{
camera_atr_dt
[
0
],
camera_atr_dt
[
1
],
camera_atr_dt
[
2
],
camera_xyz_dt
[
0
],
camera_xyz_dt
[
1
],
camera_xyz_dt
[
2
]};
final
double
[][]
mb_vectors
=
new
double
[
ref_pXpYD
.
length
][];
IntersceneLma
intersceneLma
=
new
IntersceneLma
(
this
,
// OpticalFlow opticalFlow
false
);
// clt_parameters.ilp.ilma_thread_invariant);
intersceneLma
.
prepareLMA
(
camera_xyz
,
// final double [] scene_xyz0, // camera center in world coordinates (or null to use instance)
camera_atr
,
// final double [] scene_atr0, // camera orientation relative to world frame (or null to use instance)
scene
,
// final QuadCLT scene_QuadClt,
ref_scene
,
// final QuadCLT reference_QuadClt,
param_select
,
// final boolean[] param_select,
null
,
// final double [] param_regweights,
null
,
// final double [][] vector_XYS, // optical flow X,Y, confidence obtained from the correlate2DIterate()
ref_pXpYD
,
// final double [][] centers, // macrotile centers (in pixels and average disparities
false
,
// boolean first_run,
debug_level
);
// final int debug_level)
final
double
[][]
last_jt
=
intersceneLma
.
getLastJT
();
// alternating x,y for each selected parameters
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
threadsMax
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
for
(
int
nTile
=
ai
.
getAndIncrement
();
nTile
<
ref_pXpYD
.
length
;
nTile
=
ai
.
getAndIncrement
())
if
(
ref_pXpYD
[
nTile
]
!=
null
){
mb_vectors
[
nTile
]=
new
double
[
2
];
for
(
int
i
=
0
;
i
<
par_indices
.
length
;
i
++)
{
mb_vectors
[
nTile
][
0
]
+=
camera_dt
[
i
]
*
last_jt
[
i
][
2
*
nTile
+
0
];
mb_vectors
[
nTile
][
1
]
+=
camera_dt
[
i
]
*
last_jt
[
i
][
2
*
nTile
+
1
];
}
if
(
Double
.
isNaN
(
mb_vectors
[
nTile
][
0
])
||
Double
.
isNaN
(
mb_vectors
[
nTile
][
1
]))
{
mb_vectors
[
nTile
]
=
null
;
}
}
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
return
mb_vectors
;
}
public
double
[][]
adjustPairsLMAInterscene
(
CLTParameters
clt_parameters
,
QuadCLT
reference_QuadClt
,
...
...
@@ -13614,84 +13789,6 @@ public double[][] correlateIntersceneDebug( // only uses GPU and quad
System
.
out
.
println
(
"adjustPairsLMAInterscene() returned null"
);
return
null
;
}
/*
int eq_stride_hor = 8;
int eq_stride_vert = 8;
double eq_min_stile_weight = 0.2; // 1.0;
int eq_min_stile_number = 10;
double eq_min_stile_fraction = 0.02; // 0.05;
double eq_min_disparity = 5;
double eq_max_disparity = 100;
double eq_weight_add = 0.1;
double eq_weight_scale = 10;
double eq_level = 0.8; // equalize to (log) fraction of average/this strength
if (run_equalize && near_important) {
TileProcessor tp = reference_QuadClt.getTileProcessor();
int tilesX = tp.getTilesX();
int tilesY = tp.getTilesY();
// backup coord_motion[1][][2] // strength
double [] strength_backup = null;
if (debug_equalize) {
strength_backup = new double [coord_motion[1].length];
for (int i = 0; i < strength_backup.length; i++) if (coord_motion[1][i] != null) {
strength_backup[i] = coord_motion[1][i][2];
}
}
while (run_equalize) {
// restore
if (strength_backup != null) {
for (int i = 0; i < strength_backup.length; i++) if (coord_motion[1][i] != null) {
coord_motion[1][i][2] = strength_backup[i];
}
}
equalizeMotionVectorsWeights(
coord_motion, // final double [][][] coord_motion,
tilesX, // final int tilesX,
eq_stride_hor, // final int stride_hor,
eq_stride_vert, // final int stride_vert,
eq_min_stile_weight, // final double min_stile_weight,
eq_min_stile_number, // final int min_stile_number,
eq_min_stile_fraction, // final double min_stile_fraction,
eq_min_disparity, // final double min_disparity,
eq_max_disparity, // final double max_disparity,
eq_weight_add, // final double weight_add,
eq_weight_scale, // final double weight_scale)
eq_level); // equalize to (log) fraction of average/this strength
if (!debug_equalize) {
break;
}
String [] mvTitles = {"dx", "dy","conf", "conf0", "pX", "pY","Disp","defined"}; // ,"blurX","blurY", "blur"};
double [][] dbg_img = new double [mvTitles.length][tilesX*tilesY];
for (int l = 0; l < dbg_img.length; l++) {
Arrays.fill(dbg_img[l], Double.NaN);
}
for (int nTile = 0; nTile < coord_motion[0].length; nTile++) {
if (coord_motion[0][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[4+i][nTile] = coord_motion[0][nTile][i];
}
}
dbg_img[3] = strength_backup;
if (coord_motion[1][nTile] != null) {
for (int i = 0; i <3; i++) {
dbg_img[0+i][nTile] = coord_motion[1][nTile][i];
}
}
dbg_img[7][nTile] = ((coord_motion[0][nTile] != null)?1:0)+((coord_motion[0][nTile] != null)?2:0);
}
(new ShowDoubleFloatArrays()).showArrays( // out of boundary 15
dbg_img,
tilesX,
tilesY,
true,
scene_QuadClt.getImageName()+"-"+reference_QuadClt.getImageName()+"-coord_motion-eq",
mvTitles);
}
}
*/
intersceneLma
.
prepareLMA
(
camera_xyz0
,
// final double [] scene_xyz0, // camera center in world coordinates (or null to use instance)
...
...
src/main/java/com/elphel/imagej/tileprocessor/QuadCLT.java
View file @
7d6fb681
...
...
@@ -2660,6 +2660,42 @@ public class QuadCLT extends QuadCLTCPU {
}
public
static
ImagePlus
renderGPUFromDSI
(
final
int
sensor_mask
,
final
boolean
merge_channels
,
final
Rectangle
full_woi_in
,
// show larger than sensor WOI in tiles (or null)
CLTParameters
clt_parameters
,
double
[]
disparity_ref
,
final
double
[]
scene_xyz
,
// camera center in world coordinates
final
double
[]
scene_atr
,
// camera orientation relative to world frame
final
QuadCLT
scene
,
final
QuadCLT
ref_scene
,
// now - may be null - for testing if scene is rotated ref
final
boolean
toRGB
,
final
boolean
show_nan
,
String
suffix
,
int
threadsMax
,
final
int
debugLevel
){
return
renderGPUFromDSI
(
sensor_mask
,
merge_channels
,
full_woi_in
,
// show larger than sensor WOI in tiles (or null)
clt_parameters
,
disparity_ref
,
// motion blur compensation
0.0
,
// double mb_tau, // 0.008; // time constant, sec
0.0
,
// mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
null
,
// double [][] mb_vectors, //
scene_xyz
,
// camera center in world coordinates
scene_atr
,
// camera orientation relative to world frame
scene
,
ref_scene
,
// now - may be null - for testing if scene is rotated ref
toRGB
,
show_nan
,
suffix
,
threadsMax
,
debugLevel
);
}
public
static
ImagePlus
renderGPUFromDSI
(
final
int
sensor_mask
,
...
...
@@ -2667,8 +2703,11 @@ public class QuadCLT extends QuadCLTCPU {
final
Rectangle
full_woi_in
,
// show larger than sensor WOI in tiles (or null)
CLTParameters
clt_parameters
,
double
[]
disparity_ref
,
// not used, just as null/not null now. All offsets are already in scene_xyz, scene_atr (including ref)
// double [] stereo_offset, // offset reference camera {x,y,z} or null
// motion blur compensation
double
mb_tau
,
// 0.008; // time constant, sec
double
mb_max_gain
,
// 5.0; // motion blur maximal gain (if more - move second point more than a pixel
double
[][]
mb_vectors
,
//
final
double
[]
scene_xyz
,
// camera center in world coordinates
final
double
[]
scene_atr
,
// camera orientation relative to world frame
final
QuadCLT
scene
,
...
...
@@ -2678,7 +2717,6 @@ public class QuadCLT extends QuadCLTCPU {
String
suffix
,
int
threadsMax
,
final
int
debugLevel
){
// boolean show_nan = toRGB? clt_parameters.imp.show_color_nan : clt_parameters.imp.show_mono_nan;
double
[][]
pXpYD
=
OpticalFlow
.
transformToScenePxPyD
(
// now should work with offset ref_scene
full_woi_in
,
// final Rectangle [] extra_woi, // show larger than sensor WOI (or null)
disparity_ref
,
// final double [] disparity_ref, // invalid tiles - NaN in disparity
...
...
@@ -2695,35 +2733,62 @@ public class QuadCLT extends QuadCLTCPU {
if
(
showPxPyD
)
{
int
dbg_width
=
rendered_width
/
GPUTileProcessor
.
DTT_SIZE
;
int
dbg_height
=
pXpYD
.
length
/
dbg_width
;
double
[][]
dbg_img
=
new
double
[
3
][
pXpYD
.
length
];
double
[][]
dbg_img
=
new
double
[
3
+
((
mb_vectors
!=
null
)?
2
:
0
)][
pXpYD
.
length
];
String
[]
dbg_titles
=
(
mb_vectors
!=
null
)?
(
new
String
[]
{
"pX"
,
"pY"
,
"Disparity"
,
"mb_X"
,
"mb_Y"
}):
(
new
String
[]
{
"pX"
,
"pY"
,
"Disparity"
});
for
(
int
i
=
0
;
i
<
dbg_img
.
length
;
i
++)
{
Arrays
.
fill
(
dbg_img
[
i
],
Double
.
NaN
);
}
for
(
int
nTile
=
0
;
nTile
<
pXpYD
.
length
;
nTile
++)
if
(
pXpYD
[
nTile
]
!=
null
){
for
(
int
i
=
0
;
i
<
dbg_img
.
length
;
i
++)
{
for
(
int
i
=
0
;
i
<
pXpYD
[
nTile
]
.
length
;
i
++)
{
dbg_img
[
i
][
nTile
]
=
pXpYD
[
nTile
][
i
];
}
}
if
(
mb_vectors
[
nTile
]!=
null
)
{
for
(
int
i
=
0
;
i
<
2
;
i
++)
{
dbg_img
[
3
+
i
][
nTile
]
=
mb_tau
*
mb_vectors
[
nTile
][
i
];
}
}
}
(
new
ShowDoubleFloatArrays
()).
showArrays
(
// out of boundary 15
dbg_img
,
dbg_width
,
dbg_height
,
true
,
"pXpYD"
,
new
String
[]
{
"pX"
,
"pY"
,
"Disparity"
});
scene
.
getImageName
()+
"-pXpYD"
,
dbg_titles
);
}
TpTask
[][]
tp_tasks
;
if
(
mb_vectors
!=
null
)
{
tp_tasks
=
GpuQuad
.
setInterTasksMotionBlur
(
// "true" reference, with stereo actual reference will be offset
scene
.
getNumSensors
(),
rendered_width
,
// should match output size, pXpYD.length
!
scene
.
hasGPU
(),
// final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD
,
// final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null
,
// final boolean [] selection, // may be null, if not null do not process unselected tiles
// motion blur compensation
mb_tau
,
// final double mb_tau, // 0.008; // time constant, sec
mb_max_gain
,
// final double mb_max_gain, // 5.0; // motion blur maximal gain (if more - move second point more than a pixel
mb_vectors
,
//final double [][] mb_vectors, //
scene
.
getErsCorrection
(),
// final GeometryCorrection geometryCorrection,
0.0
,
// final double disparity_corr,
-
1
,
// 0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null
,
// final boolean [] valid_tiles,
threadsMax
);
// final int threadsMax) // maximal number of threads to launch
}
else
{
tp_tasks
=
new
TpTask
[
1
][];
tp_tasks
[
0
]
=
GpuQuad
.
setInterTasks
(
// "true" reference, with stereo actual reference will be offset
scene
.
getNumSensors
(),
rendered_width
,
// should match output size, pXpYD.length
!
scene
.
hasGPU
(),
// final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD
,
// final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null
,
// final boolean [] selection, // may be null, if not null do not process unselected tiles
scene
.
getErsCorrection
(),
// final GeometryCorrection geometryCorrection,
0.0
,
// final double disparity_corr,
-
1
,
// 0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null
,
// final boolean [] valid_tiles,
threadsMax
);
// final int threadsMax) // maximal number of threads to launch
}
//scene_QuadClt.getTileProcessor().getTileSize();
TpTask
[]
tp_tasks_ref
=
GpuQuad
.
setInterTasks
(
// "true" reference, with stereo actual reference will be offset
scene
.
getNumSensors
(),
rendered_width
,
// should match output size, pXpYD.length
!
scene
.
hasGPU
(),
// final boolean calcPortsCoordinatesAndDerivatives, // GPU can calculate them centreXY
pXpYD
,
// final double [][] pXpYD, // per-tile array of pX,pY,disparity triplets (or nulls)
null
,
// final boolean [] selection, // may be null, if not null do not process unselected tiles
scene
.
getErsCorrection
(),
// final GeometryCorrection geometryCorrection,
0.0
,
// final double disparity_corr,
-
1
,
// 0, // margin, // final int margin, // do not use tiles if their centers are closer to the edges
null
,
// final boolean [] valid_tiles,
threadsMax
);
// final int threadsMax) // maximal number of threads to launch
scene
.
saveQuadClt
();
// to re-load new set of Bayer images to the GPU (do nothing for CPU) and Geometry
ImageDtt
image_dtt
=
new
ImageDtt
(
scene
.
getNumSensors
(),
...
...
@@ -2738,20 +2803,35 @@ public class QuadCLT extends QuadCLTCPU {
int
[]
wh
=
(
full_woi_in
==
null
)?
null
:
new
int
[]{
full_woi_in
.
width
*
GPUTileProcessor
.
DTT_SIZE
,
full_woi_in
.
height
*
GPUTileProcessor
.
DTT_SIZE
};
// boolean toRGB = true; // does not work here, define in ColorProcParameters
int
erase_clt
=
show_nan
?
1
:
0
;
image_dtt
.
setReferenceTD
(
// change to main?
erase_clt
,
//final int erase_clt,
wh
,
// null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters
.
img_dtt
,
// final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference
,
// true, // final boolean use_reference_buffer,
tp_tasks_ref
,
// final TpTask[] tp_tasks,
clt_parameters
.
gpu_sigma_r
,
// final double gpu_sigma_r, // 0.9, 1.1
clt_parameters
.
gpu_sigma_b
,
// final double gpu_sigma_b, // 0.9, 1.1
clt_parameters
.
gpu_sigma_g
,
// final double gpu_sigma_g, // 0.6, 0.7
clt_parameters
.
gpu_sigma_m
,
// final double gpu_sigma_m, // = 0.4; // 0.7;
threadsMax
,
// final int threadsMax, // maximal number of threads to launch
debugLevel
);
// final int globalDebugLevel);
boolean
test1
=
true
;
if
((
mb_vectors
!=
null
)
&&
test1
)
{
image_dtt
.
setReferenceTDMotionBlur
(
// change to main?
erase_clt
,
//final int erase_clt,
wh
,
// null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters
.
img_dtt
,
// final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference
,
// true, // final boolean use_reference_buffer,
tp_tasks
,
// final TpTask[] tp_tasks,
clt_parameters
.
gpu_sigma_r
,
// final double gpu_sigma_r, // 0.9, 1.1
clt_parameters
.
gpu_sigma_b
,
// final double gpu_sigma_b, // 0.9, 1.1
clt_parameters
.
gpu_sigma_g
,
// final double gpu_sigma_g, // 0.6, 0.7
clt_parameters
.
gpu_sigma_m
,
// final double gpu_sigma_m, // = 0.4; // 0.7;
threadsMax
,
// final int threadsMax, // maximal number of threads to launch
debugLevel
);
// final int globalDebugLevel);
}
else
{
image_dtt
.
setReferenceTD
(
// change to main?
erase_clt
,
//final int erase_clt,
wh
,
// null, // final int [] wh, // null (use sensor dimensions) or pair {width, height} in pixels
clt_parameters
.
img_dtt
,
// final ImageDttParameters imgdtt_params, // Now just extra correlation parameters, later will include, most others
use_reference
,
// true, // final boolean use_reference_buffer,
tp_tasks
[
0
],
// final TpTask[] tp_tasks,
clt_parameters
.
gpu_sigma_r
,
// final double gpu_sigma_r, // 0.9, 1.1
clt_parameters
.
gpu_sigma_b
,
// final double gpu_sigma_b, // 0.9, 1.1
clt_parameters
.
gpu_sigma_g
,
// final double gpu_sigma_g, // 0.6, 0.7
clt_parameters
.
gpu_sigma_m
,
// final double gpu_sigma_m, // = 0.4; // 0.7;
threadsMax
,
// final int threadsMax, // maximal number of threads to launch
debugLevel
);
// final int globalDebugLevel);
}
ImagePlus
imp_render
=
scene
.
renderFromTD
(
sensor_mask
,
// final int sensor_mask,
merge_channels
,
// boolean merge_channels,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment