Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
I
imagej-elphel
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
3
Issues
3
List
Board
Labels
Milestones
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Commits
Issue Boards
Open sidebar
Elphel
imagej-elphel
Commits
1671728b
Commit
1671728b
authored
Apr 29, 2024
by
Andrey Filippov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Equalizing intensities
parent
c4df622d
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
916 additions
and
583 deletions
+916
-583
PolynomialApproximation.java
...ava/com/elphel/imagej/common/PolynomialApproximation.java
+513
-353
ComboMatch.java
src/main/java/com/elphel/imagej/orthomosaic/ComboMatch.java
+6
-3
OrthoMap.java
src/main/java/com/elphel/imagej/orthomosaic/OrthoMap.java
+20
-54
OrthoMapsCollection.java
...va/com/elphel/imagej/orthomosaic/OrthoMapsCollection.java
+257
-154
OrthoMultiLMA.java
...ain/java/com/elphel/imagej/orthomosaic/OrthoMultiLMA.java
+93
-12
PairwiseOrthoMatch.java
...ava/com/elphel/imagej/orthomosaic/PairwiseOrthoMatch.java
+26
-7
TileNeibs.java
src/main/java/com/elphel/imagej/tileprocessor/TileNeibs.java
+1
-0
No files found.
src/main/java/com/elphel/imagej/common/PolynomialApproximation.java
View file @
1671728b
package
com
.
elphel
.
imagej
.
common
;
import
java.util.concurrent.atomic.AtomicInteger
;
import
com.elphel.imagej.tileprocessor.ImageDtt
;
import
com.elphel.imagej.tileprocessor.QuadCLT
;
import
Jama.LUDecomposition
;
import
Jama.Matrix
;
public
class
PolynomialApproximation
{
public
int
debugLevel
=
1
;
// TODO Move other methods here
// TODO Move other methods here
public
PolynomialApproximation
(){}
public
PolynomialApproximation
(
int
debugLevel
){
this
.
debugLevel
=
debugLevel
;
...
...
@@ -53,7 +58,7 @@ public class PolynomialApproximation {
System
.
out
.
println
(
"polynomialApproximation1d() B:"
);
B
.
print
(
10
,
5
);
}
// while (!(new LUDecomposition(M)).isNonsingular() && (N1>0)){
// while (!(new LUDecomposition(M)).isNonsingular() && (N1>0)){
while
(!(
new
LUDecomposition
(
M
)).
isNonsingular
()
&&
(
N1
>=
0
)){
// make N=0 legal ?
aM
=
new
double
[
N1
][
N1
];
aB
=
new
double
[
N1
][
1
];
...
...
@@ -81,14 +86,14 @@ public class PolynomialApproximation {
for
(
int
i
=
0
;
i
<=
N
;
i
++)
result
[
i
]=(
i
<=
N1
)?
aR
[
i
][
0
]:
0.0
;
return
result
;
}
/**
* Linear approximates each of 3 functions of 3 variables and finds where they are all zero
* @param data: for each sample (1-st index):
* 0 - {x,y,z}
* 1 - {f1, f2, f3},
* 2 - {weight}
* @return {x0, y0, z0} where A1*x0+B1*y0+C1*z0+D1=0, A2*x0+B2*y0+C2*z0+D2=0, A3*x0+B3*y0+C3*z0+D3=0
*/
/**
* Linear approximates each of 3 functions of 3 variables and finds where they are all zero
* @param data: for each sample (1-st index):
* 0 - {x,y,z}
* 1 - {f1, f2, f3},
* 2 - {weight}
* @return {x0, y0, z0} where A1*x0+B1*y0+C1*z0+D1=0, A2*x0+B2*y0+C2*z0+D2=0, A3*x0+B3*y0+C3*z0+D3=0
*/
public
double
[]
linear3d
(
double
[][][]
data
){
/*
* Approximating each of the 3 measured parameters (Far/near, tilt x and tilt y) with linear approximation in the vicinity of the last position
...
...
@@ -102,10 +107,10 @@ public class PolynomialApproximation {
if
(
this
.
debugLevel
>
3
){
System
.
out
.
println
(
parNum
+
","
+
data
[
nSample
][
0
][
0
]+
","
+
data
[
nSample
][
0
][
1
]+
","
+
data
[
nSample
][
0
][
2
]+
","
+
data
[
nSample
][
1
][
0
]+
","
+
data
[
nSample
][
1
][
1
]+
","
+
data
[
nSample
][
1
][
2
]+
","
+
data
[
nSample
][
2
][
0
]);
data
[
nSample
][
1
][
0
]+
","
+
data
[
nSample
][
1
][
1
]+
","
+
data
[
nSample
][
1
][
2
]+
","
+
data
[
nSample
][
2
][
0
]);
}
//TODO replace with PolynomialApproximation class
//TODO replace with PolynomialApproximation class
double
w
=(
data
[
nSample
].
length
>
2
)?
data
[
nSample
][
2
][
0
]:
1.0
;
double
[]
xyz
=
data
[
nSample
][
0
];
S0
+=
w
;
...
...
@@ -168,151 +173,151 @@ public class PolynomialApproximation {
}
public
double
[]
quadraticMax2d
(
double
[][][]
data
){
return
quadraticMax2d
(
data
,
1.0
E
-
15
);
}
public
double
[]
quadraticMax2d
(
double
[][][]
data
,
double
thresholdQuad
){
return
quadraticMax2d
(
data
,
thresholdQuad
,
debugLevel
);
}
public
double
[]
quadraticMax2d
(
double
[][][]
data
,
double
thresholdQuad
,
int
debugLevel
){
double
[][]
coeff
=
quadraticApproximation
(
data
,
false
,
1.0
E
-
20
,
thresholdQuad
,
debugLevel
);
if
(
coeff
==
null
)
return
null
;
if
(
coeff
[
0
].
length
<
6
)
return
null
;
double
[][]
aM
={
{
2
*
coeff
[
0
][
0
],
coeff
[
0
][
2
]},
// | 2A, C |
{
coeff
[
0
][
2
],
2
*
coeff
[
0
][
1
]}};
// | C, 2B |
Matrix
M
=(
new
Matrix
(
aM
));
double
nmQ
=
normMatix
(
aM
);
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
nmQ
+
" data.length="
+
data
.
length
);
if
((
nmQ
==
0.0
)
||
(
Math
.
abs
(
M
.
det
())/
normMatix
(
aM
)<
thresholdQuad
))
{
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"quadraticMax2d() failed: M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
normMatix
(
aM
));
return
null
;
}
double
[][]
aB
={
{-
coeff
[
0
][
3
]},
// | - D |
{-
coeff
[
0
][
4
]}};
// | - E |
double
[]
xy
=
M
.
solve
(
new
Matrix
(
aB
)).
getColumnPackedCopy
();
return
xy
;
}
// returns maximum's coordinates, value, and coefficients for x2, y2 and xy
public
double
[]
quadraticMaxV2dX2Y2XY
(
double
[][][]
data
,
double
thresholdQuad
,
int
debugLevel
){
double
[][]
coeff
=
quadraticApproximation
(
data
,
false
,
1.0
E
-
20
,
thresholdQuad
,
debugLevel
);
if
(
coeff
==
null
)
return
null
;
if
(
coeff
[
0
].
length
<
6
)
return
null
;
double
[][]
aM
={
{
2
*
coeff
[
0
][
0
],
coeff
[
0
][
2
]},
// | 2A, C |
{
coeff
[
0
][
2
],
2
*
coeff
[
0
][
1
]}};
// | C, 2B |
Matrix
M
=(
new
Matrix
(
aM
));
double
nmQ
=
normMatix
(
aM
);
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
nmQ
+
" data.length="
+
data
.
length
);
if
((
nmQ
==
0.0
)
||
(
Math
.
abs
(
M
.
det
())/
normMatix
(
aM
)<
thresholdQuad
))
{
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"quadraticMax2d() failed: M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
normMatix
(
aM
));
return
null
;
}
double
[][]
aB
={
{-
coeff
[
0
][
3
]},
// | - D |
{-
coeff
[
0
][
4
]}};
// | - E |
double
[]
xy
=
M
.
solve
(
new
Matrix
(
aB
)).
getColumnPackedCopy
();
double
vmax
=
coeff
[
0
][
0
]*
xy
[
0
]*
xy
[
0
]+
coeff
[
0
][
1
]*
xy
[
1
]*
xy
[
1
]+
coeff
[
0
][
2
]*
xy
[
0
]*
xy
[
1
]+
coeff
[
0
][
3
]*
xy
[
0
]+
coeff
[
0
][
4
]*
xy
[
1
]+
coeff
[
0
][
5
];
double
[]
xyx2y2
=
{
xy
[
0
],
xy
[
1
],
vmax
,
coeff
[
0
][
0
],
coeff
[
0
][
1
],
coeff
[
0
][
2
],
coeff
[
0
][
3
],
coeff
[
0
][
4
],
coeff
[
0
][
5
]};
return
xyx2y2
;
}
public
double
[]
quadraticMax2d
(
double
[][][]
data
){
return
quadraticMax2d
(
data
,
1.0
E
-
15
);
}
public
double
[]
quadraticMax2d
(
double
[][][]
data
,
double
thresholdQuad
){
return
quadraticMax2d
(
data
,
thresholdQuad
,
debugLevel
);
}
public
double
[]
quadraticMax2d
(
double
[][][]
data
,
double
thresholdQuad
,
int
debugLevel
){
double
[][]
coeff
=
quadraticApproximation
(
data
,
false
,
1.0
E
-
20
,
thresholdQuad
,
debugLevel
);
if
(
coeff
==
null
)
return
null
;
if
(
coeff
[
0
].
length
<
6
)
return
null
;
double
[][]
aM
={
{
2
*
coeff
[
0
][
0
],
coeff
[
0
][
2
]},
// | 2A, C |
{
coeff
[
0
][
2
],
2
*
coeff
[
0
][
1
]}};
// | C, 2B |
Matrix
M
=(
new
Matrix
(
aM
));
double
nmQ
=
normMatix
(
aM
);
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
nmQ
+
" data.length="
+
data
.
length
);
if
((
nmQ
==
0.0
)
||
(
Math
.
abs
(
M
.
det
())/
normMatix
(
aM
)<
thresholdQuad
))
{
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"quadraticMax2d() failed: M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
normMatix
(
aM
));
return
null
;
}
double
[][]
aB
={
{-
coeff
[
0
][
3
]},
// | - D |
{-
coeff
[
0
][
4
]}};
// | - E |
double
[]
xy
=
M
.
solve
(
new
Matrix
(
aB
)).
getColumnPackedCopy
();
return
xy
;
}
// returns maximum's coordinates, value, and coefficients for x2, y2 and xy
public
double
[]
quadraticMaxV2dX2Y2XY
(
double
[][][]
data
,
double
thresholdQuad
,
int
debugLevel
){
double
[][]
coeff
=
quadraticApproximation
(
data
,
false
,
1.0
E
-
20
,
thresholdQuad
,
debugLevel
);
if
(
coeff
==
null
)
return
null
;
if
(
coeff
[
0
].
length
<
6
)
return
null
;
double
[][]
aM
={
{
2
*
coeff
[
0
][
0
],
coeff
[
0
][
2
]},
// | 2A, C |
{
coeff
[
0
][
2
],
2
*
coeff
[
0
][
1
]}};
// | C, 2B |
Matrix
M
=(
new
Matrix
(
aM
));
double
nmQ
=
normMatix
(
aM
);
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
nmQ
+
" data.length="
+
data
.
length
);
if
((
nmQ
==
0.0
)
||
(
Math
.
abs
(
M
.
det
())/
normMatix
(
aM
)<
thresholdQuad
))
{
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"quadraticMax2d() failed: M.det()="
+
M
.
det
()+
" normMatix(aM)="
+
normMatix
(
aM
));
return
null
;
}
double
[][]
aB
={
{-
coeff
[
0
][
3
]},
// | - D |
{-
coeff
[
0
][
4
]}};
// | - E |
double
[]
xy
=
M
.
solve
(
new
Matrix
(
aB
)).
getColumnPackedCopy
();
double
vmax
=
coeff
[
0
][
0
]*
xy
[
0
]*
xy
[
0
]+
coeff
[
0
][
1
]*
xy
[
1
]*
xy
[
1
]+
coeff
[
0
][
2
]*
xy
[
0
]*
xy
[
1
]+
coeff
[
0
][
3
]*
xy
[
0
]+
coeff
[
0
][
4
]*
xy
[
1
]+
coeff
[
0
][
5
];
double
[]
xyx2y2
=
{
xy
[
0
],
xy
[
1
],
vmax
,
coeff
[
0
][
0
],
coeff
[
0
][
1
],
coeff
[
0
][
2
],
coeff
[
0
][
3
],
coeff
[
0
][
4
],
coeff
[
0
][
5
]};
return
xyx2y2
;
}
/* ======================================================================== */
/**
* See below, this version is for backward compatibility with no damping
* @param data
* @param forceLinear
* @return
*/
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
)
// use linear approximation
{
return
quadraticApproximation
(
data
,
forceLinear
,
null
);
}
/**
* See below, this version is for backward compatibility with no damping
* @param data
* @param forceLinear
* @return
*/
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
)
// use linear approximation
{
return
quadraticApproximation
(
data
,
forceLinear
,
null
);
}
/**
* Approximate function z(x,y) as a second degree polynomial (or just linear)
* f(x,y)=A*x^2+B*y^2+C*x*y+D*x+E*y+F or f(x,y)=D*x+E*y+F
* @param data array consists of lines of either 2 or 3 vectors:
* 2-element vector x,y
* variable length vector z (should be the same for all samples)
* optional 1- element vector w (weight of the sample)
* @param forceLinear force linear approximation
* @param damping optional (may be null) array of 3 (for linear) or 6 (quadratic) values that adds cost
* for corresponding coefficient be non-zero. This can be used to find reasonable solutions when
* data is insufficient. Just one data point would result in a plane of constant z, co-linear
* points will produce a plane with a gradient along this line
* @return array of vectors or null
* each vector (one per each z component) is either 6-element- (A,B,C,D,E,F) if quadratic is possible and enabled
* or 3-element - (D,E,F) if linear is possible and quadratic is not possible or disabled
* returns null if not enough data even for the linear approximation
*/
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
[]
damping
)
{
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
damping
,
this
.
debugLevel
);
}
/**
* Approximate function z(x,y) as a second degree polynomial (or just linear)
* f(x,y)=A*x^2+B*y^2+C*x*y+D*x+E*y+F or f(x,y)=D*x+E*y+F
* @param data array consists of lines of either 2 or 3 vectors:
* 2-element vector x,y
* variable length vector z (should be the same for all samples)
* optional 1- element vector w (weight of the sample)
* @param forceLinear force linear approximation
* @param damping optional (may be null) array of 3 (for linear) or 6 (quadratic) values that adds cost
* for corresponding coefficient be non-zero. This can be used to find reasonable solutions when
* data is insufficient. Just one data point would result in a plane of constant z, co-linear
* points will produce a plane with a gradient along this line
* @return array of vectors or null
* each vector (one per each z component) is either 6-element- (A,B,C,D,E,F) if quadratic is possible and enabled
* or 3-element - (D,E,F) if linear is possible and quadratic is not possible or disabled
* returns null if not enough data even for the linear approximation
*/
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
[]
damping
)
{
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
damping
,
this
.
debugLevel
);
}
public
double
[][]
quadraticApproximation
(
// no use
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
int
debugLevel
){
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
null
,
debugLevel
);
}
public
double
[][]
quadraticApproximation
(
// no use
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
int
debugLevel
){
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
null
,
debugLevel
);
}
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
[]
damping
,
int
debugLevel
){
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
damping
,
1.0
E
-
10
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0
E
-
15
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
debugLevel
);
}
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
[]
damping
,
int
debugLevel
){
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
damping
,
1.0
E
-
10
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0
E
-
15
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
debugLevel
);
}
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double
thresholdQuad
)
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
{
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
null
,
1.0
E
-
10
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0
E
-
15
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
this
.
debugLevel
);
}
/*
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double
thresholdQuad
)
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
{
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
null
,
1.0
E
-
10
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
1.0
E
-
15
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
this
.
debugLevel
);
}
/*
public double [][] quadraticApproximation( // no use
double [][][] data,
boolean forceLinear, // use linear approximation
...
...
@@ -328,39 +333,39 @@ public class PolynomialApproximation {
1.0E-15, // threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
this.debugLevel);
}
*/
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double
thresholdQuad
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
int
debugLevel
)
{
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
null
,
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
thresholdQuad
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
debugLevel
);
*/
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double
thresholdQuad
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
int
debugLevel
)
{
return
quadraticApproximation
(
data
,
forceLinear
,
// use linear approximation
null
,
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail) 11.0E-10 failed where it shouldn't?
thresholdQuad
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
debugLevel
);
}
}
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
[]
damping
,
double
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double
thresholdQuad
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
int
debugLevel
){
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"quadraticApproximation(...), debugLevel="
+
debugLevel
+
":"
);
if
((
data
==
null
)
||
(
data
.
length
==
0
))
{
return
null
;
}
/* ix, iy - the location of the point with maximal value. We'll approximate the vicinity of that maximum using a
* second degree polynomial:
public
double
[][]
quadraticApproximation
(
double
[][][]
data
,
boolean
forceLinear
,
// use linear approximation
double
[]
damping
,
double
thresholdLin
,
// threshold ratio of matrix determinant to norm for linear approximation (det too low - fail)
double
thresholdQuad
,
// threshold ratio of matrix determinant to norm for quadratic approximation (det too low - fail)
int
debugLevel
){
if
(
debugLevel
>
3
)
System
.
out
.
println
(
"quadraticApproximation(...), debugLevel="
+
debugLevel
+
":"
);
if
((
data
==
null
)
||
(
data
.
length
==
0
))
{
return
null
;
}
/* ix, iy - the location of the point with maximal value. We'll approximate the vicinity of that maximum using a
* second degree polynomial:
Z(x,y)~=A*x^2+B*y^2+C*x*y+D*x+E*y+F
by minimizing sum of squared differenceS00between the actual (Z(x,uy)) and approximated values.
and then find the maximum on the approximated surface. Here iS00the math:
...
...
@@ -424,183 +429,338 @@ public class PolynomialApproximation {
(4) 0= A*S30 + B*S12 + C*S21 + D*S20 + E*S11 + F*S10 - SZ10
(5) 0= A*S21 + B*S03 + C*S12 + D*S11 + E*S02 + F*S01 - SZ01
(6) 0= A*S20 + B*S02 + C*S11 + D*S10 + E*S01 + F*S00 - SZ00
*/
Matrix
mDampingLin
=
null
;
Matrix
mDampingQuad
=
null
;
if
(
damping
!=
null
){
mDampingLin
=
new
Matrix
(
3
,
3
);
for
(
int
i
=
0
;
i
<
3
;
i
++){
int
j
=
damping
.
length
+
i
-
3
;
if
(
j
>=
0
)
mDampingLin
.
set
(
i
,
i
,
damping
[
j
]);
}
if
(!
forceLinear
)
{
mDampingQuad
=
new
Matrix
(
6
,
6
);
for
(
int
i
=
0
;
i
<
6
;
i
++){
int
j
=
damping
.
length
+
i
-
6
;
if
(
j
>=
0
)
mDampingQuad
.
set
(
i
,
i
,
damping
[
j
]);
}
}
}
int
zDim
=
0
;
// =data[0][1].length;
for
(
int
i
=
0
;
i
<
data
.
length
;
i
++)
if
(
data
[
i
]
!=
null
){
zDim
=
data
[
i
][
1
].
length
;
break
;
}
double
w
,
z
,
x
,
x2
,
x3
,
x4
,
y
,
y2
,
y3
,
y4
,
wz
;
int
i
,
j
,
n
=
0
;
double
S00
=
0.0
,
S10
=
0.0
,
S01
=
0.0
,
S20
=
0.0
,
S11
=
0.0
,
S02
=
0.0
,
S30
=
0.0
,
S21
=
0.0
,
S12
=
0.0
,
S03
=
0.0
,
S40
=
0.0
,
S31
=
0.0
,
S22
=
0.0
,
S13
=
0.0
,
S04
=
0.0
;
double
[]
SZ00
=
new
double
[
zDim
];
double
[]
SZ01
=
new
double
[
zDim
];
double
[]
SZ10
=
new
double
[
zDim
];
double
[]
SZ11
=
new
double
[
zDim
];
double
[]
SZ02
=
new
double
[
zDim
];
double
[]
SZ20
=
new
double
[
zDim
];
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
SZ00
[
i
]=
0.0
;
SZ01
[
i
]=
0.0
;
SZ10
[
i
]=
0.0
;
SZ11
[
i
]=
0.0
;
SZ02
[
i
]=
0.0
;
SZ20
[
i
]=
0.0
;
}
for
(
i
=
0
;
i
<
data
.
length
;
i
++)
if
(
data
[
i
]
!=
null
){
w
=(
data
[
i
].
length
>
2
)?
data
[
i
][
2
][
0
]:
1.0
;
if
(
w
>
0
)
{
n
++;
x
=
data
[
i
][
0
][
0
];
y
=
data
[
i
][
0
][
1
];
x2
=
x
*
x
;
y2
=
y
*
y
;
S00
+=
w
;
S10
+=
w
*
x
;
S01
+=
w
*
y
;
S11
+=
w
*
x
*
y
;
S20
+=
w
*
x2
;
S02
+=
w
*
y2
;
if
(!
forceLinear
)
{
x3
=
x2
*
x
;
x4
=
x3
*
x
;
y3
=
y2
*
y
;
y4
=
y3
*
y
;
S30
+=
w
*
x3
;
S21
+=
w
*
x2
*
y
;
S12
+=
w
*
x
*
y2
;
S03
+=
w
*
y3
;
S40
+=
w
*
x4
;
S31
+=
w
*
x3
*
y
;
S22
+=
w
*
x2
*
y2
;
S13
+=
w
*
x
*
y3
;
S04
+=
w
*
y4
;
}
for
(
j
=
0
;
j
<
zDim
;
j
++)
{
z
=
data
[
i
][
1
][
j
];
wz
=
w
*
z
;
SZ00
[
j
]+=
wz
;
SZ10
[
j
]+=
wz
*
x
;
SZ01
[
j
]+=
wz
*
y
;
if
(!
forceLinear
)
{
SZ20
[
j
]+=
wz
*
x2
;
SZ11
[
j
]+=
wz
*
x
*
y
;
SZ02
[
j
]+=
wz
*
y2
;
}
}
}
}
//need to decide if there is enough data for linear and quadratic
double
[][]
mAarrayL
=
{
{
S20
,
S11
,
S10
},
{
S11
,
S02
,
S01
},
{
S10
,
S01
,
S00
}};
Matrix
mLin
=
new
Matrix
(
mAarrayL
);
if
(
mDampingLin
!=
null
){
mLin
.
plusEquals
(
mDampingLin
);
}
// TODO Maybe bypass determinant checks for damped ?
Matrix
Z
;
if
(
debugLevel
>
3
)
System
.
out
.
println
(
">>> n="
+
n
+
" det_lin="
+
mLin
.
det
()+
" norm_lin="
+
normMatix
(
mAarrayL
));
double
nmL
=
normMatix
(
mAarrayL
);
if
((
nmL
==
0.0
)
||
(
Math
.
abs
(
mLin
.
det
())/
nmL
<
thresholdLin
)){
// return average value for each channel
if
(
S00
==
0.0
)
return
null
;
// not even average
double
[][]
ABCDEF
=
new
double
[
zDim
][
3
];
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
ABCDEF
[
i
][
0
]=
0.0
;
ABCDEF
[
i
][
1
]=
0.0
;
ABCDEF
[
i
][
2
]=
SZ00
[
i
]/
S00
;
}
return
ABCDEF
;
}
double
[]
zAarrayL
=
new
double
[
3
];
double
[][]
ABCDEF
=
new
double
[
zDim
][];
// double [] zAarrayL={SZ10,SZ01,SZ00};
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
zAarrayL
[
0
]=
SZ10
[
i
];
zAarrayL
[
1
]=
SZ01
[
i
];
zAarrayL
[
2
]=
SZ00
[
i
];
Z
=
new
Matrix
(
zAarrayL
,
3
);
ABCDEF
[
i
]=
mLin
.
solve
(
Z
).
getRowPackedCopy
();
}
if
(
forceLinear
)
return
ABCDEF
;
// quote try quadratic approximation
double
[][]
mAarrayQ
=
{
{
S40
,
S22
,
S31
,
S30
,
S21
,
S20
},
{
S22
,
S04
,
S13
,
S12
,
S03
,
S02
},
{
S31
,
S13
,
S22
,
S21
,
S12
,
S11
},
{
S30
,
S12
,
S21
,
S20
,
S11
,
S10
},
{
S21
,
S03
,
S12
,
S11
,
S02
,
S01
},
{
S20
,
S02
,
S11
,
S10
,
S01
,
S00
}};
Matrix
mQuad
=
new
Matrix
(
mAarrayQ
);
if
(
mDampingQuad
!=
null
){
mQuad
.
plusEquals
(
mDampingQuad
);
}
if
(
debugLevel
>
3
)
{
System
.
out
.
println
(
" n="
+
n
+
" det_quad="
+
mQuad
.
det
()+
" norm_quad="
+
normMatix
(
mAarrayQ
)+
" data.length="
+
data
.
length
);
mQuad
.
print
(
10
,
5
);
}
double
nmQ
=
normMatix
(
mAarrayQ
);
if
((
nmQ
==
0.0
)
||
(
Math
.
abs
(
mQuad
.
det
())/
normMatix
(
mAarrayQ
)<
thresholdQuad
))
{
if
(
debugLevel
>
0
)
System
.
out
.
println
(
"Using linear approximation, M.det()="
+
mQuad
.
det
()+
" normMatix(mAarrayQ)="
+
normMatix
(
mAarrayQ
)+
", thresholdQuad="
+
thresholdQuad
+
", nmQ="
+
nmQ
+
", Math.abs(M.det())/normMatix(mAarrayQ)="
+(
Math
.
abs
(
mQuad
.
det
())/
normMatix
(
mAarrayQ
)));
//did not happen
return
ABCDEF
;
// not enough data for the quadratic approximation, return linear
}
// double [] zAarrayQ={SZ20,SZ02,SZ11,SZ10,SZ01,SZ00};
double
[]
zAarrayQ
=
new
double
[
6
];
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
zAarrayQ
[
0
]=
SZ20
[
i
];
zAarrayQ
[
1
]=
SZ02
[
i
];
zAarrayQ
[
2
]=
SZ11
[
i
];
zAarrayQ
[
3
]=
SZ10
[
i
];
zAarrayQ
[
4
]=
SZ01
[
i
];
zAarrayQ
[
5
]=
SZ00
[
i
];
Z
=
new
Matrix
(
zAarrayQ
,
6
);
ABCDEF
[
i
]=
mQuad
.
solve
(
Z
).
getRowPackedCopy
();
}
return
ABCDEF
;
}
// calculate "volume" made of the matrix row-vectors, placed orthogonally
// to be compared to determinant
public
double
normMatix
(
double
[][]
a
)
{
double
d
,
norm
=
1.0
;
for
(
int
i
=
0
;
i
<
a
.
length
;
i
++)
{
d
=
0
;
for
(
int
j
=
0
;
j
<
a
[
i
].
length
;
j
++)
d
+=
a
[
i
][
j
]*
a
[
i
][
j
];
norm
*=
Math
.
sqrt
(
d
);
}
return
norm
;
*/
Matrix
mDampingLin
=
null
;
Matrix
mDampingQuad
=
null
;
if
(
damping
!=
null
){
mDampingLin
=
new
Matrix
(
3
,
3
);
for
(
int
i
=
0
;
i
<
3
;
i
++){
int
j
=
damping
.
length
+
i
-
3
;
if
(
j
>=
0
)
mDampingLin
.
set
(
i
,
i
,
damping
[
j
]);
}
if
(!
forceLinear
)
{
mDampingQuad
=
new
Matrix
(
6
,
6
);
for
(
int
i
=
0
;
i
<
6
;
i
++){
int
j
=
damping
.
length
+
i
-
6
;
if
(
j
>=
0
)
mDampingQuad
.
set
(
i
,
i
,
damping
[
j
]);
}
}
}
int
zDim
=
0
;
// =data[0][1].length;
for
(
int
i
=
0
;
i
<
data
.
length
;
i
++)
if
(
data
[
i
]
!=
null
){
zDim
=
data
[
i
][
1
].
length
;
break
;
}
double
w
,
z
,
x
,
x2
,
x3
,
x4
,
y
,
y2
,
y3
,
y4
,
wz
;
int
i
,
j
,
n
=
0
;
double
S00
=
0.0
,
S10
=
0.0
,
S01
=
0.0
,
S20
=
0.0
,
S11
=
0.0
,
S02
=
0.0
,
S30
=
0.0
,
S21
=
0.0
,
S12
=
0.0
,
S03
=
0.0
,
S40
=
0.0
,
S31
=
0.0
,
S22
=
0.0
,
S13
=
0.0
,
S04
=
0.0
;
double
[]
SZ00
=
new
double
[
zDim
];
double
[]
SZ01
=
new
double
[
zDim
];
double
[]
SZ10
=
new
double
[
zDim
];
double
[]
SZ11
=
new
double
[
zDim
];
double
[]
SZ02
=
new
double
[
zDim
];
double
[]
SZ20
=
new
double
[
zDim
];
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
SZ00
[
i
]=
0.0
;
SZ01
[
i
]=
0.0
;
SZ10
[
i
]=
0.0
;
SZ11
[
i
]=
0.0
;
SZ02
[
i
]=
0.0
;
SZ20
[
i
]=
0.0
;
}
for
(
i
=
0
;
i
<
data
.
length
;
i
++)
if
(
data
[
i
]
!=
null
){
w
=(
data
[
i
].
length
>
2
)?
data
[
i
][
2
][
0
]:
1.0
;
if
(
w
>
0
)
{
n
++;
x
=
data
[
i
][
0
][
0
];
y
=
data
[
i
][
0
][
1
];
x2
=
x
*
x
;
y2
=
y
*
y
;
S00
+=
w
;
S10
+=
w
*
x
;
S01
+=
w
*
y
;
S11
+=
w
*
x
*
y
;
S20
+=
w
*
x2
;
S02
+=
w
*
y2
;
if
(!
forceLinear
)
{
x3
=
x2
*
x
;
x4
=
x3
*
x
;
y3
=
y2
*
y
;
y4
=
y3
*
y
;
S30
+=
w
*
x3
;
S21
+=
w
*
x2
*
y
;
S12
+=
w
*
x
*
y2
;
S03
+=
w
*
y3
;
S40
+=
w
*
x4
;
S31
+=
w
*
x3
*
y
;
S22
+=
w
*
x2
*
y2
;
S13
+=
w
*
x
*
y3
;
S04
+=
w
*
y4
;
}
for
(
j
=
0
;
j
<
zDim
;
j
++)
{
z
=
data
[
i
][
1
][
j
];
wz
=
w
*
z
;
SZ00
[
j
]+=
wz
;
SZ10
[
j
]+=
wz
*
x
;
SZ01
[
j
]+=
wz
*
y
;
if
(!
forceLinear
)
{
SZ20
[
j
]+=
wz
*
x2
;
SZ11
[
j
]+=
wz
*
x
*
y
;
SZ02
[
j
]+=
wz
*
y2
;
}
}
}
}
//need to decide if there is enough data for linear and quadratic
double
[][]
mAarrayL
=
{
{
S20
,
S11
,
S10
},
{
S11
,
S02
,
S01
},
{
S10
,
S01
,
S00
}};
Matrix
mLin
=
new
Matrix
(
mAarrayL
);
if
(
mDampingLin
!=
null
){
mLin
.
plusEquals
(
mDampingLin
);
}
// TODO Maybe bypass determinant checks for damped ?
Matrix
Z
;
if
(
debugLevel
>
3
)
System
.
out
.
println
(
">>> n="
+
n
+
" det_lin="
+
mLin
.
det
()+
" norm_lin="
+
normMatix
(
mAarrayL
));
double
nmL
=
normMatix
(
mAarrayL
);
if
((
nmL
==
0.0
)
||
(
Math
.
abs
(
mLin
.
det
())/
nmL
<
thresholdLin
)){
// return average value for each channel
if
(
S00
==
0.0
)
return
null
;
// not even average
double
[][]
ABCDEF
=
new
double
[
zDim
][
3
];
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
ABCDEF
[
i
][
0
]=
0.0
;
ABCDEF
[
i
][
1
]=
0.0
;
ABCDEF
[
i
][
2
]=
SZ00
[
i
]/
S00
;
}
return
ABCDEF
;
}
double
[]
zAarrayL
=
new
double
[
3
];
double
[][]
ABCDEF
=
new
double
[
zDim
][];
// double [] zAarrayL={SZ10,SZ01,SZ00};
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
zAarrayL
[
0
]=
SZ10
[
i
];
zAarrayL
[
1
]=
SZ01
[
i
];
zAarrayL
[
2
]=
SZ00
[
i
];
Z
=
new
Matrix
(
zAarrayL
,
3
);
ABCDEF
[
i
]=
mLin
.
solve
(
Z
).
getRowPackedCopy
();
}
if
(
forceLinear
)
return
ABCDEF
;
// quote try quadratic approximation
double
[][]
mAarrayQ
=
{
{
S40
,
S22
,
S31
,
S30
,
S21
,
S20
},
{
S22
,
S04
,
S13
,
S12
,
S03
,
S02
},
{
S31
,
S13
,
S22
,
S21
,
S12
,
S11
},
{
S30
,
S12
,
S21
,
S20
,
S11
,
S10
},
{
S21
,
S03
,
S12
,
S11
,
S02
,
S01
},
{
S20
,
S02
,
S11
,
S10
,
S01
,
S00
}};
Matrix
mQuad
=
new
Matrix
(
mAarrayQ
);
if
(
mDampingQuad
!=
null
){
mQuad
.
plusEquals
(
mDampingQuad
);
}
if
(
debugLevel
>
3
)
{
System
.
out
.
println
(
" n="
+
n
+
" det_quad="
+
mQuad
.
det
()+
" norm_quad="
+
normMatix
(
mAarrayQ
)+
" data.length="
+
data
.
length
);
mQuad
.
print
(
10
,
5
);
}
double
nmQ
=
normMatix
(
mAarrayQ
);
if
((
nmQ
==
0.0
)
||
(
Math
.
abs
(
mQuad
.
det
())/
normMatix
(
mAarrayQ
)<
thresholdQuad
))
{
if
(
debugLevel
>
0
)
System
.
out
.
println
(
"Using linear approximation, M.det()="
+
mQuad
.
det
()+
" normMatix(mAarrayQ)="
+
normMatix
(
mAarrayQ
)+
", thresholdQuad="
+
thresholdQuad
+
", nmQ="
+
nmQ
+
", Math.abs(M.det())/normMatix(mAarrayQ)="
+(
Math
.
abs
(
mQuad
.
det
())/
normMatix
(
mAarrayQ
)));
//did not happen
return
ABCDEF
;
// not enough data for the quadratic approximation, return linear
}
// double [] zAarrayQ={SZ20,SZ02,SZ11,SZ10,SZ01,SZ00};
double
[]
zAarrayQ
=
new
double
[
6
];
for
(
i
=
0
;
i
<
zDim
;
i
++)
{
zAarrayQ
[
0
]=
SZ20
[
i
];
zAarrayQ
[
1
]=
SZ02
[
i
];
zAarrayQ
[
2
]=
SZ11
[
i
];
zAarrayQ
[
3
]=
SZ10
[
i
];
zAarrayQ
[
4
]=
SZ01
[
i
];
zAarrayQ
[
5
]=
SZ00
[
i
];
Z
=
new
Matrix
(
zAarrayQ
,
6
);
ABCDEF
[
i
]=
mQuad
.
solve
(
Z
).
getRowPackedCopy
();
}
return
ABCDEF
;
}
// calculate "volume" made of the matrix row-vectors, placed orthogonally
// to be compared to determinant
public
double
normMatix
(
double
[][]
a
)
{
double
d
,
norm
=
1.0
;
for
(
int
i
=
0
;
i
<
a
.
length
;
i
++)
{
d
=
0
;
for
(
int
j
=
0
;
j
<
a
[
i
].
length
;
j
++)
d
+=
a
[
i
][
j
]*
a
[
i
][
j
];
norm
*=
Math
.
sqrt
(
d
);
}
return
norm
;
}
//RuntimeException
public
static
double
[]
getYXRegression
(
final
double
[]
data_x
,
final
double
[]
data_y
,
final
boolean
[]
mask
)
{
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
QuadCLT
.
THREADS_MAX
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
final
double
[]
as0
=
new
double
[
threads
.
length
];
final
double
[]
asx
=
new
double
[
threads
.
length
];
final
double
[]
asx2
=
new
double
[
threads
.
length
];
final
double
[]
asy
=
new
double
[
threads
.
length
];
final
double
[]
asxy
=
new
double
[
threads
.
length
];
final
AtomicInteger
ati
=
new
AtomicInteger
(
0
);
ai
.
set
(
0
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
int
thread_num
=
ati
.
getAndIncrement
();
for
(
int
ipix
=
ai
.
getAndIncrement
();
ipix
<
data_x
.
length
;
ipix
=
ai
.
getAndIncrement
())
if
((
mask
==
null
)
||
mask
[
ipix
]){
double
x
=
data_x
[
ipix
];
double
y
=
data_y
[
ipix
];
if
(!
Double
.
isNaN
(
x
)
&&
!
Double
.
isNaN
(
y
))
{
as0
[
thread_num
]
+=
1
;
asx
[
thread_num
]
+=
x
;
asx2
[
thread_num
]
+=
x
*
x
;
asy
[
thread_num
]
+=
y
;
asxy
[
thread_num
]
+=
x
*
y
;
}
}
// for (int ipix
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
double
s0
=
0.0
;
double
sx
=
0.0
;
double
sx2
=
0.0
;
double
sy
=
0.0
;
double
sxy
=
0.0
;
for
(
int
i
=
0
;
i
<
threads
.
length
;
i
++)
{
s0
+=
as0
[
i
];
sx
+=
asx
[
i
];
sx2
+=
asx2
[
i
];
sy
+=
asy
[
i
];
sxy
+=
asxy
[
i
];
}
double
dnm
=
s0
*
sx2
-
sx
*
sx
;
double
a
=
(
sxy
*
s0
-
sy
*
sx
)
/
dnm
;
double
b
=
(
sy
*
sx2
-
sxy
*
sx
)
/
dnm
;
return
new
double
[]
{
a
,
b
};
}
/**
* Get best fit ax+b symmetrical for X and Y, same weights
* https://en.wikipedia.org/wiki/Deming_regression#Orthogonal_regression
* @param data_x
* @param data_y
* @param mask
* @return
*/
public
static
double
[]
getOrthoRegression
(
// symmetrical for X and Y, same eror weights
final
double
[]
data_x
,
final
double
[]
data_y
,
final
boolean
[]
mask_in
)
{
final
boolean
[]
mask
=
new
boolean
[
data_x
.
length
];
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
QuadCLT
.
THREADS_MAX
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
final
double
[]
as0
=
new
double
[
threads
.
length
];
final
double
[]
asx
=
new
double
[
threads
.
length
];
final
double
[]
asy
=
new
double
[
threads
.
length
];
final
AtomicInteger
ati
=
new
AtomicInteger
(
0
);
ai
.
set
(
0
);
// Find centroid first
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
int
thread_num
=
ati
.
getAndIncrement
();
for
(
int
ipix
=
ai
.
getAndIncrement
();
ipix
<
data_x
.
length
;
ipix
=
ai
.
getAndIncrement
())
if
((
mask_in
==
null
)
||
mask_in
[
ipix
]){
double
x
=
data_x
[
ipix
];
double
y
=
data_y
[
ipix
];
if
(!
Double
.
isNaN
(
x
)
&&
!
Double
.
isNaN
(
y
))
{
as0
[
thread_num
]
+=
1
;
asx
[
thread_num
]
+=
x
;
asy
[
thread_num
]
+=
y
;
mask
[
ipix
]
=
true
;
}
}
// for (int ipix
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
double
s0
=
0.0
;
double
sx
=
0.0
;
double
sy
=
0.0
;
for
(
int
i
=
0
;
i
<
threads
.
length
;
i
++)
{
s0
+=
as0
[
i
];
sx
+=
asx
[
i
];
sy
+=
asy
[
i
];
}
double
[]
z_mean
=
{
sx
/
s0
,
sy
/
s0
};
// complex
final
double
[]
as_re
=
new
double
[
threads
.
length
];
final
double
[]
as_im
=
new
double
[
threads
.
length
];
ai
.
set
(
0
);
ati
.
set
(
0
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
int
thread_num
=
ati
.
getAndIncrement
();
for
(
int
ipix
=
ai
.
getAndIncrement
();
ipix
<
data_x
.
length
;
ipix
=
ai
.
getAndIncrement
())
if
((
mask
==
null
)
||
mask
[
ipix
]){
double
x
=
data_x
[
ipix
]-
z_mean
[
0
];
double
y
=
data_y
[
ipix
]-
z_mean
[
1
];
as_re
[
thread_num
]
+=
x
*
x
-
y
*
y
;
as_im
[
thread_num
]
+=
2
*
x
*
y
;
}
// for (int ipix
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
double
s_re
=
0.0
;
double
s_im
=
0.0
;
for
(
int
i
=
0
;
i
<
threads
.
length
;
i
++)
{
s_re
+=
as_re
[
i
];
s_im
+=
as_im
[
i
];
}
// https://en.wikipedia.org/wiki/Square_root#Algebraic_formula
// sqrt (s_re+i*s_im)
double
sqrt_re
=
Math
.
sqrt
(
0.5
*
(
Math
.
sqrt
(
s_re
*
s_re
+
s_im
*
s_im
)
+
s_re
));
double
sqrt_im
=
((
s_im
>
0
)?
1
:
-
1
)
*
Math
.
sqrt
(
0.5
*
(
Math
.
sqrt
(
s_re
*
s_re
+
s_im
*
s_im
)
-
s_re
));
double
a
=
sqrt_im
/
sqrt_re
;
double
b
=
z_mean
[
1
]-
a
*
z_mean
[
0
];
return
new
double
[]
{
a
,
b
};
}
public
static
void
applyRegression
(
final
double
[]
data
,
// clone by caller
final
double
[]
regression
)
{
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
QuadCLT
.
THREADS_MAX
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
for
(
int
ipix
=
ai
.
getAndIncrement
();
ipix
<
data
.
length
;
ipix
=
ai
.
getAndIncrement
()){
data
[
ipix
]
=
regression
[
0
]
*
data
[
ipix
]
+
regression
[
1
];
}
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
}
public
static
double
[]
invertRegression
(
double
[]
regression
)
{
return
new
double
[]
{
1.0
/
regression
[
0
],
-
regression
[
1
]/
regression
[
0
]};
}
}
src/main/java/com/elphel/imagej/orthomosaic/ComboMatch.java
View file @
1671728b
...
...
@@ -171,7 +171,7 @@ public class ComboMatch {
boolean
pattern_match
=
true
;
// false;
boolean
bounds_to_indices
=
true
;
int
temp_mode
=
1
;
int
temp_mode
=
0
;
boolean
restore_temp
=
true
;
double
frac_remove
=
clt_parameters
.
imp
.
pmtch_frac_remove
;
// 0.15;
double
metric_error
=
clt_parameters
.
imp
.
pmtch_metric_err
;
// 0.05; // 0.02;// 2 cm
...
...
@@ -1030,10 +1030,13 @@ public class ComboMatch {
}
if
(
render_match
)
{
String
title
=
String
.
format
(
"multi_%03d-%03d_%s-%s_zoom%d_%d"
,
gpu_pair
[
0
],
gpu_pair
[
1
],
gpu_spair
[
0
],
gpu_spair
[
1
],
min_zoom_lev
,
zoom_lev
);
// Avoid renderMulti() - it duplicates code renderMultiDouble()
int
eq_mode
=
2
;
// calculate
ImagePlus
imp_img_pair
=
maps_collection
.
renderMulti
(
//_zoom<integer> is needed for opening with "Extract Objects" command
title
,
// String title,
OrthoMapsCollection
.
MODE_IMAGE
,
// int mode, // 0 - regular image, 1 - altitudes, 2 - black/white mask // boolean use_alt,
title
,
// String title,
// OrthoMapsCollection.MODE_IMAGE, // int mode, // 0 - regular image, 1 - altitudes, 2 - black/white mask // boolean use_alt,
eq_mode
,
//int eq_mode, // 0 - ignore equalization, 1 - use stored equalization, 2 - calculate equalization
gpu_pair
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
temp_mode
,
// int temp_mode, // 0 - do nothing, 1 - equalize average,2 - try to correct
...
...
src/main/java/com/elphel/imagej/orthomosaic/OrthoMap.java
View file @
1671728b
...
...
@@ -115,6 +115,7 @@ public class OrthoMap implements Comparable <OrthoMap>, Serializable{
public
transient
double
agl
=
Double
.
NaN
;
public
transient
int
num_scenes
=
-
1
;;
// number of scenes that made up this image
public
transient
double
sfm_gain
=
Double
.
NaN
;
// maximal SfM gain of this map
public
transient
double
[]
equalize
=
{
1
,
0
};
// rectified value = equalize[0]*source_value+equalize[1]
private
void
writeObject
(
ObjectOutputStream
oos
)
throws
IOException
{
oos
.
defaultWriteObject
();
oos
.
writeObject
(
path
);
...
...
@@ -137,8 +138,9 @@ public class OrthoMap implements Comparable <OrthoMap>, Serializable{
oos
.
writeObject
(
agl
);
oos
.
writeObject
(
num_scenes
);
oos
.
writeObject
(
sfm_gain
);
oos
.
writeObject
(
equalize
);
}
private
void
readObject
(
ObjectInputStream
ois
)
throws
ClassNotFoundException
,
IOException
{
// sfm_gain = Double.NaN;
// num_scenes = -1;
...
...
@@ -160,12 +162,26 @@ public class OrthoMap implements Comparable <OrthoMap>, Serializable{
agl
=
(
double
)
ois
.
readObject
();
num_scenes
=
(
int
)
ois
.
readObject
();
sfm_gain
=
(
double
)
ois
.
readObject
();
// equalize = new double[] {1,0};
equalize
=
(
double
[])
ois
.
readObject
();
images
=
new
HashMap
<
Integer
,
FloatImageData
>();
// field images was not saved
averageImagePixel
=
Double
.
NaN
;
// average image pixel value (to combine with raw)
// pairwise_matches is not transient
// pairwise_matches = new HashMap<Double, PairwiseOrthoMatch>();
}
double
getEqualized
(
double
d
)
{
return
d
*
equalize
[
0
]
+
equalize
[
1
];
}
double
[]
getEqualize
()
{
return
equalize
;
}
void
setEqualize
(
double
[]
equalize
)
{
this
.
equalize
=
equalize
;
}
@Override
public
int
compareTo
(
OrthoMap
otherPlayer
)
{
...
...
@@ -1717,9 +1733,9 @@ public class OrthoMap implements Comparable <OrthoMap>, Serializable{
};
}
ImageDtt
.
startAndJoin
(
threads
);
double
[]
ab
=
getDati
Regression
(
temp
,
// final double []
temp
,
dati
,
// final double [] dat
i
,
double
[]
ab
=
PolynomialApproximation
.
getYX
Regression
(
temp
,
// final double []
data_x
,
dati
,
// final double [] dat
a_y
,
flat
);
// final boolean [] mask);
double
a
=
ab
[
0
];
double
b
=
ab
[
1
];
...
...
@@ -1774,56 +1790,6 @@ public class OrthoMap implements Comparable <OrthoMap>, Serializable{
return
flat
;
}
private
static
double
[]
getDatiRegression
(
final
double
[]
temp
,
final
double
[]
dati
,
final
boolean
[]
mask
)
{
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
(
QuadCLT
.
THREADS_MAX
);
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
final
double
[]
as0
=
new
double
[
threads
.
length
];
final
double
[]
asx
=
new
double
[
threads
.
length
];
final
double
[]
asx2
=
new
double
[
threads
.
length
];
final
double
[]
asy
=
new
double
[
threads
.
length
];
final
double
[]
asxy
=
new
double
[
threads
.
length
];
final
AtomicInteger
ati
=
new
AtomicInteger
(
0
);
ai
.
set
(
0
);
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
int
thread_num
=
ati
.
getAndIncrement
();
for
(
int
ipix
=
ai
.
getAndIncrement
();
ipix
<
temp
.
length
;
ipix
=
ai
.
getAndIncrement
())
if
(
mask
[
ipix
]){
double
x
=
temp
[
ipix
];
double
y
=
dati
[
ipix
];
if
(!
Double
.
isNaN
(
x
+
y
))
{
as0
[
thread_num
]
+=
1
;
asx
[
thread_num
]
+=
x
;
asx2
[
thread_num
]
+=
x
*
x
;
asy
[
thread_num
]
+=
y
;
asxy
[
thread_num
]
+=
x
*
y
;
}
}
// for (int ipix
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
double
s0
=
0.0
;
double
sx
=
0.0
;
double
sx2
=
0.0
;
double
sy
=
0.0
;
double
sxy
=
0.0
;
for
(
int
i
=
0
;
i
<
threads
.
length
;
i
++)
{
s0
+=
as0
[
i
];
sx
+=
asx
[
i
];
sx2
+=
asx2
[
i
];
sy
+=
asy
[
i
];
sxy
+=
asxy
[
i
];
}
double
dnm
=
s0
*
sx2
-
sx
*
sx
;
double
a
=
(
sxy
*
s0
-
sy
*
sx
)
/
dnm
;
double
b
=
(
sy
*
sx2
-
sxy
*
sx
)
/
dnm
;
return
new
double
[]
{
a
,
b
};
}
private
static
int
[][]
getCirclePoints
(
double
radius
)
{
...
...
src/main/java/com/elphel/imagej/orthomosaic/OrthoMapsCollection.java
View file @
1671728b
...
...
@@ -30,6 +30,7 @@ import com.elphel.imagej.calibration.CalibrationFileManagement;
import
com.elphel.imagej.cameras.CLTParameters
;
import
com.elphel.imagej.common.DoubleGaussianBlur
;
import
com.elphel.imagej.common.GenericJTabbedDialog
;
import
com.elphel.imagej.common.PolynomialApproximation
;
import
com.elphel.imagej.common.ShowDoubleFloatArrays
;
import
com.elphel.imagej.gpu.GPUTileProcessor
;
import
com.elphel.imagej.gpu.TpTask
;
...
...
@@ -537,32 +538,10 @@ public class OrthoMapsCollection implements Serializable{
ol
.
setPixels
(
xy_src
);
}
}
public
ImagePlus
renderMulti
(
String
title
,
int
mode
,
// 0 - regular image, 1 - altitudes, 2 - black/white mask
boolean
show_centers
,
int
zoom_level
,
int
temp_mode
,
// 0 - do nothing, 1 - equalize average,2 - try to correct
int
[]
origin
){
return
renderMulti
(
title
,
// String title,
mode
,
// int mode, // 0 - regular image, 1 - altitudes, 2 - black/white mask
null
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
false
,
// bounds_to_indices, // boolean bounds_to_indices,
temp_mode
,
// int temp_mode, // 0 - do nothing, 1 - equalize average,2 - try to correct
null
,
// double [][][] affines, // null or [indices.length][2][3]
null
,
// FineXYCorr warp,
show_centers
,
// boolean show_centers,
zoom_level
,
// int zoom_level,
origin
);
// int [] origin)
}
public
ImagePlus
renderMulti
(
String
title
,
// boolean use_alt,
int
mode
,
// 0 - regular image, 1 - altitudes, 2 - black/white mask
int
eq_mode
,
// 0 - ignore equalization, 1 - use stored equalization, 2 - calculate equalization
int
[]
indices
,
// null or which indices to use (normally just 2 for pairwise comparison)
boolean
bounds_to_indices
,
int
temp_mode
,
// 0 - do nothing, 1 - equalize average,2 - try to correct
...
...
@@ -570,82 +549,157 @@ public class OrthoMapsCollection implements Serializable{
FineXYCorr
warp
,
// use for a single pair only
boolean
show_centers
,
int
zoom_level
,
int
[]
origin
){
int
num_images
=
(
indices
!=
null
)?
indices
.
length
:
ortho_maps
.
length
;
int
[]
wh
=
new
int
[
2
];
double
[][]
centers
=
new
double
[(
indices
!=
null
)?
indices
.
length
:
ortho_maps
.
length
][];
/*
float [][] multi = renderMulti (
// use_alt, // boolean use_alt,
mode
,
// int mode, // 0 - regular image, 1 - altitudes, 2 - black/white mask
OrthoMapsCollection.MODE_IMAGE, // int mode, // 0 - regular image, 1 - altitudes, 2 - black/white mask
indices, // int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices, // boolean bounds_to_indices,
affines, // double [][][] affines, // null or [indices.length][2][3]
warp
,
// FineXYCorr warp,
,
warp, // FineXYCorr warp,
zoom_level, // int zoom_level,
wh, // int [] wh,
origin, // int [] origin){ // maps[0] as a reference
centers); // double [][] centers)
int
num_images
=
(
indices
!=
null
)?
indices
.
length
:
ortho_maps
.
length
;
String
[]
map_names
=
new
String
[
num_images
+
(((
num_images
==
2
)
&&
(
mode
==
MODE_IMAGE
))?
1
:
0
)];
float
[][]
extra_multi
=
new
float
[
map_names
.
length
][];
*/
double
[][]
dmulti
=
renderMultiDouble
(
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
affines
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
(
eq_mode
!=
1
),
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
if
((
eq_mode
==
2
)
&&
(
num_images
==
2
))
{
boolean
[]
mask
=
new
boolean
[
dmulti
[
0
].
length
];
// int hist_min = -140,hist_max=135; // 700,200
// int hist_min = -100,hist_max=100; // 700,200
int
hist_min
=
-
250
,
hist_max
=
100
;
// 700,200
int
hist_width
=
hist_max
-
hist_min
;
double
[]
hist
=
new
double
[
hist_width
*
hist_width
];
for
(
int
i
=
0
;
i
<
dmulti
[
0
].
length
;
i
++)
if
(!
Double
.
isNaN
(
dmulti
[
0
][
i
])
&&
!
Double
.
isNaN
(
dmulti
[
1
][
i
])){
int
x
=
(
int
)
Math
.
round
(
dmulti
[
0
][
i
])
-
hist_min
;
int
y
=
(
int
)
Math
.
round
(
dmulti
[
1
][
i
])
-
hist_min
;
if
((
x
>=
0
)
&&
(
y
>=
0
)
&&
(
x
<
hist_width
)
&&
(
y
<
hist_width
))
{
int
indx
=
y
*
hist_width
+
x
;
hist
[
indx
]
+=
1.0
;
mask
[
i
]
=
true
;
}
}
ShowDoubleFloatArrays
.
showArrays
(
hist
,
hist_width
,
hist_width
,
title
+
"hist_"
+
hist_min
+
"_"
+
hist_max
);
double
[]
dmulti1
=
dmulti
[
1
].
clone
();
double
[]
regression
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti
[
1
],
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
inv_regression
=
PolynomialApproximation
.
invertRegression
(
regression
);
double
[]
regression10
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
1
],
// final double [] data_x,
dmulti
[
0
],
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
regression00
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti
[
0
],
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
regression11
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
1
],
// final double [] data_x,
dmulti
[
1
],
// final double [] data_y,
mask
);
// final boolean [] mask)
PolynomialApproximation
.
applyRegression
(
dmulti1
,
// final double [] data, // clone by caller
inv_regression
);
// final double [] regression) {
double
[]
regression01
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti1
,
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
dmulti12
=
dmulti
[
1
].
clone
();
for
(
int
i
=
0
;
i
<
dmulti12
.
length
;
i
++)
{
dmulti12
[
i
]
=
2.0
*
dmulti12
[
i
]+
10
;
}
double
[]
regression012
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti12
,
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
dmulti13
=
dmulti
[
1
].
clone
();
for
(
int
i
=
0
;
i
<
dmulti13
.
length
;
i
++)
{
dmulti13
[
i
]
=
dmulti13
[
i
]+
10
;
}
double
[]
regression013
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti13
,
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
dmulti14
=
dmulti
[
1
].
clone
();
for
(
int
i
=
0
;
i
<
dmulti14
.
length
;
i
++)
{
dmulti14
[
i
]
=
2
*
dmulti14
[
i
];
}
double
[]
regression014
=
PolynomialApproximation
.
getOrthoRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti14
,
// final double [] data_y,
mask
);
// final boolean [] mask)
double
[]
regression_single
=
getYXRegression
(
dmulti
[
0
],
// final double [] data_x,
dmulti
[
1
]);
// final double [] data_y,
System
.
out
.
println
();
}
String
[]
map_names
=
new
String
[
num_images
+
(((
num_images
==
2
))?
1
:
0
)];
// float [][] extra_multi = new float [map_names.length][];
double
[][]
extra_multi
=
new
double
[
map_names
.
length
][];
for
(
int
n
=
0
;
n
<
num_images
;
n
++)
{
int
mapn
=
(
indices
!=
null
)?
indices
[
n
]
:
n
;
map_names
[
n
]
=
ortho_maps
[
mapn
].
getName
()+
"_"
+
ortho_maps
[
mapn
].
getLocalDateTime
().
toString
().
replace
(
"T"
,
"_"
)+
"_UTC"
;
map_names
[
n
]
+=
String
.
format
(
" raw=%7.1f"
,
ortho_maps
[
mapn
].
getTemperature
());
extra_multi
[
n
]
=
multi
[
n
];
extra_multi
[
n
]
=
d
multi
[
n
];
}
if
(
map_names
.
length
>
num_images
)
{
// inefficient way, only for display
// correct(offset) pixel values relative to multi[0]
int
map0
=
(
indices
!=
null
)?
indices
[
0
]
:
0
;
if
(
mode
==
MODE_IMAGE
)
{
for
(
int
n
=
1
;
n
<
num_images
;
n
++)
{
//temp_mode, // 0 - do nothing, 1 - equalize average,2 - try to correct
int
mapn
=
(
indices
!=
null
)?
indices
[
n
]
:
n
;
double
offs
=
0
;
switch
(
temp_mode
)
{
case
1
:
offs
=
ortho_maps
[
map0
].
getAveragePixel
()-
ortho_maps
[
mapn
].
getAveragePixel
();
break
;
case
2
:
offs
=
ortho_maps
[
mapn
].
getTemperature
()-
ortho_maps
[
mapn
].
getAveragePixel
()
-(
ortho_maps
[
map0
].
getTemperature
()-
ortho_maps
[
map0
].
getAveragePixel
());
break
;
}
float
foffs
=
(
float
)
offs
;
for
(
int
i
=
0
;
i
<
extra_multi
[
n
].
length
;
i
++)
if
(!
Float
.
isNaN
(
extra_multi
[
n
][
i
]))
{
extra_multi
[
n
][
i
]+=
foffs
;
}
for
(
int
n
=
1
;
n
<
num_images
;
n
++)
{
//temp_mode, // 0 - do nothing, 1 - equalize average,2 - try to correct
int
mapn
=
(
indices
!=
null
)?
indices
[
n
]
:
n
;
double
offs
=
0
;
switch
(
temp_mode
)
{
case
1
:
offs
=
ortho_maps
[
map0
].
getAveragePixel
()-
ortho_maps
[
mapn
].
getAveragePixel
();
break
;
case
2
:
offs
=
ortho_maps
[
mapn
].
getTemperature
()-
ortho_maps
[
mapn
].
getAveragePixel
()
-(
ortho_maps
[
map0
].
getTemperature
()-
ortho_maps
[
map0
].
getAveragePixel
());
break
;
}
// float foffs= (float) offs;
for
(
int
i
=
0
;
i
<
extra_multi
[
n
].
length
;
i
++)
if
(!
Double
.
isNaN
(
extra_multi
[
n
][
i
]))
{
extra_multi
[
n
][
i
]+=
offs
;
}
}
map_names
[
num_images
]
=
"Diff 0-1"
;
extra_multi
[
num_images
]
=
new
float
[
extra_multi
[
0
].
length
];
extra_multi
[
num_images
]
=
new
double
[
extra_multi
[
0
].
length
];
for
(
int
i
=
0
;
i
<
extra_multi
[
num_images
].
length
;
i
++)
{
extra_multi
[
num_images
][
i
]
=
extra_multi
[
1
][
i
]-
extra_multi
[
0
][
i
];
}
}
ImageStack
stack
;
if
(
mode
==
MODE_MASK
)
{
title
+=
"-MASK"
;
byte
[][]
byte_pix
=
new
byte
[
multi
.
length
][];
// multi[0].length];
for
(
int
n
=
0
;
n
<
multi
.
length
;
n
++)
{
// slow
byte_pix
[
n
]
=
new
byte
[
multi
[
n
].
length
];
for
(
int
i
=
0
;
i
<
multi
[
n
].
length
;
i
++)
{
byte_pix
[
n
][
i
]
=
(
byte
)
(
Float
.
isNaN
(
multi
[
n
][
i
])
?
0
:
0xff
);
}
}
stack
=
ShowDoubleFloatArrays
.
makeStack
(
byte_pix
,
wh
[
0
],
wh
[
1
],
map_names
);
}
else
{
stack
=
ShowDoubleFloatArrays
.
makeStack
(
extra_multi
,
wh
[
0
],
wh
[
1
],
map_names
,
false
);
}
ImagePlus
imp
=
new
ImagePlus
(
title
,
stack
);
if
(
show_centers
)
{
PointRoi
roi
=
new
PointRoi
();
...
...
@@ -657,9 +711,28 @@ public class OrthoMapsCollection implements Serializable{
}
imp
.
show
();
// debugging
return
imp
;
}
public
static
double
[]
getYXRegression
(
final
double
[]
data_x
,
final
double
[]
data_y
)
{
double
s0
=
0
,
sx
=
0
,
sx2
=
0
,
sy
=
0
,
sxy
=
0
;
for
(
int
i
=
0
;
i
<
data_x
.
length
;
i
++)
{
double
x
=
data_x
[
i
];
double
y
=
2
*
data_x
[
i
];
if
(!
Double
.
isNaN
(
x
)
&&
!
Double
.
isNaN
(
y
))
{
s0
+=
1.0
;
sx
+=
x
;
sx2
+=
x
*
x
;
sy
+=
y
;
sxy
+=
x
*
y
;
}
}
double
d
=
(
s0
*
sx2
-
sx
*
sx
);
double
a
=
(
sxy
*
s0
-
sy
*
sx
)/
d
;
double
b
=
(
sy
*
sx2
-
sx
*
sxy
)/
d
;
return
new
double
[]
{
a
,
b
};
}
/**
* Rectify and render multiple images (as slices) matching vert_meters to
...
...
@@ -671,27 +744,8 @@ public class OrthoMapsCollection implements Serializable{
* @param centers - null or double[maps.length][] - will return image centers coordinates
* @return
*/
public
float
[][]
renderMulti
(
int
mode
,
// 0 - regular image, 1 - altitudes, 2 - black/white mask
int
zoom_level
,
int
[]
wh
,
int
[]
origin
,
// maps[0] as a reference
double
[][]
centers
){
return
renderMulti
(
// use_alt, // boolean use_alt,
mode
,
// int mode, // 0 - regular image, 1 - altitudes, 2 - black/white mask
null
,
// int [] indices,
false
,
// boolean bounds_to_indices,
null
,
// double [][][] affines,
null
,
// FineXYCorr warp,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin, // maps[0] as a reference
centers
);
// double [][] centers)
}
public
float
[][]
renderMulti
(
// boolean use_alt,
int
mode
,
// 0 - regular image, 1 - altitudes, 2 - black/white mask
int
[]
indices
,
// null or which indices to use (normally just 2 for pairwise comparison)
boolean
bounds_to_indices
,
...
...
@@ -703,9 +757,17 @@ public class OrthoMapsCollection implements Serializable{
double
[][]
centers
){
final
boolean
use_alt
=
mode
==
MODE_ALT
;
final
int
dbg_x
=
2783
,
dbg_y
=-
5228
;
int
[][]
bounds
=
getBoundsPixels
(
// should be for rectified, {-bounds[0][0], -bounds[0][1]} - exact center
zoom_level
,
bounds_to_indices
?
indices:
null
);
int
[][]
bounds
;
if
(
affines
==
null
)
{
bounds
=
getBoundsPixels
(
// should be for rectified, {-bounds[0][0], -bounds[0][1]} - exact center
zoom_level
,
bounds_to_indices
?
indices:
null
);
}
else
{
bounds
=
getBoundsPixels
(
// should be for rectified, {-bounds[0][0], -bounds[0][1]} - exact center
zoom_level
,
bounds_to_indices
?
indices:
null
,
affines
);
}
int
width
=
bounds
[
0
][
1
]
-
bounds
[
0
][
0
];
// bounds[x][0] - negative
int
height
=
bounds
[
1
][
1
]
-
bounds
[
1
][
0
];
if
(
wh
!=
null
)
{
...
...
@@ -724,27 +786,24 @@ public class OrthoMapsCollection implements Serializable{
}
final
float
[][]
fpixels
=
new
float
[
indices
.
length
][
width
*
height
];
// for (int nmap:indices) { // = 0; nmap< ortho_maps.length; nmap++) {
for
(
int
indx
=
0
;
indx
<
indices
.
length
;
indx
++)
{
//:indices) { // = 0; nmap< ortho_maps.length; nmap++) {
final
int
findx
=
indx
;
final
int
nmap
=
indices
[
indx
];
// nmap;
final
double
[][]
affine
=
(
affines
!=
null
)
?
affines
[
indx
]:
ortho_maps
[
nmap
].
affine
;
// only here use provided
Arrays
.
fill
(
fpixels
[
findx
],
Float
.
NaN
);
final
double
scale
=
1.0
/
OrthoMap
.
getPixelSizeMeters
(
zoom_level
);
final
double
src_scale
=
1.0
/
OrthoMap
.
getPixelSizeMeters
(
ortho_maps
[
nmap
].
orig_zoom_level
);
// pix per meter
// metric bounds of the rectified image relative to its origin
// double [][] mbounds = (affines !=null) ?
// ortho_maps[nmap].getBoundsMeters(true, affines[indx]): // use provided affine
// ortho_maps[nmap].getBoundsMeters(true);
double
[][]
mbounds
=
ortho_maps
[
nmap
].
getBoundsMeters
(
true
);
// keep original bounds
// double [][] mbounds = ortho_maps[nmap].getBoundsMeters(true); // keep original bounds
double
[][]
mbounds
=
ortho_maps
[
nmap
].
getBoundsMeters
(
true
,
affine
);
// keep original bounds
double
[]
enu_offset
=
ortho_maps
[
indices
[
0
]].
enuOffsetTo
(
ortho_maps
[
nmap
]);
final
double
[]
scaled_out_center
=
{
// xy center to apply affine to
final
double
[]
scaled_out_center
=
{
// xy center to apply affine to
in output pixels
-
bounds
[
0
][
0
]
+
scale
*
enu_offset
[
0
],
-
bounds
[
1
][
0
]
-
scale
*
enu_offset
[
1
]};
if
(
centers
!=
null
)
{
centers
[
findx
]
=
scaled_out_center
;
}
final
int
[][]
obounds
=
new
int
[
2
][
2
];
// output (rectified, combined) image bounds, relative to th
j
e top-left
for
(
int
n
=
0
;
n
<
2
;
n
++)
{
final
int
[][]
obounds
=
new
int
[
2
][
2
];
// output (rectified, combined) image bounds, relative to the top-left
for
(
int
n
=
0
;
n
<
2
;
n
++)
{
// output pixels
obounds
[
n
][
0
]
=
(
int
)
Math
.
floor
(
scaled_out_center
[
n
]
+
scale
*
mbounds
[
n
][
0
]);
obounds
[
n
][
1
]
=
(
int
)
Math
.
ceil
(
scaled_out_center
[
n
]
+
scale
*
mbounds
[
n
][
1
]);
}
...
...
@@ -752,44 +811,69 @@ public class OrthoMapsCollection implements Serializable{
final
int
ownd_width
=
obounds
[
0
][
1
]
-
obounds
[
0
][
0
];
final
int
ownd_height
=
obounds
[
1
][
1
]
-
obounds
[
1
][
0
];
final
int
ownd_len
=
ownd_width
*
ownd_height
;
// double [][] src_bounds=(affines !=null) ?
// ortho_maps[nmap].getBoundsMeters (true, affines[indx]): // use provided affine
// ortho_maps[nmap].getBoundsMeters (true);
double
[][]
src_bounds
=
ortho_maps
[
nmap
].
getBoundsMeters
(
true
);
// using original affines
// double [][] src_bounds=ortho_maps[nmap].getBoundsMeters (true); // using original affines
double
[][]
src_bounds
=
ortho_maps
[
nmap
].
getBoundsMeters
(
false
,
affine
);
// using provided affines
final
double
[]
src_center
=
{-
src_bounds
[
0
][
0
],-
src_bounds
[
1
][
0
]};
// x,y center offset in the source image
final
double
[][]
affine
=
(
affines
!=
null
)
?
affines
[
indx
]:
ortho_maps
[
nmap
].
affine
;
// only here use provided
final
int
src_width
=
use_alt
?
ortho_maps
[
nmap
].
getAltData
().
width
:
ortho_maps
[
nmap
].
getImageData
().
width
;
final
int
src_height
=
use_alt
?
ortho_maps
[
nmap
].
getAltData
().
height
:
ortho_maps
[
nmap
].
getImageData
().
height
;
final
float
[]
src_img
=
use_alt
?
ortho_maps
[
nmap
].
getAltData
().
data
:
ortho_maps
[
nmap
].
getImageData
().
data
;
if
((
indx
==
0
)
&&
(
warp
!=
null
))
{
// set center from the first image
warp
.
setRender
(
zoom_level
,
// int zoom_lev,
scaled_out_center
[
0
],
// double px0, // in render pixels
scaled_out_center
[
1
]);
// // double py0);
}
final
float
[]
src_img
=
use_alt
?
ortho_maps
[
nmap
].
getAltData
().
data
:
ortho_maps
[
nmap
].
getImageData
().
data
;
final
Rectangle
warp_woi
=((
indx
==
1
)
&&
(
warp
!=
null
))?
warp
.
getRenderWOI
():
null
;
final
Thread
[]
threads
=
ImageDtt
.
newThreadArray
();
final
AtomicInteger
ai
=
new
AtomicInteger
(
0
);
/*
boolean tilt_alt = (ground_planes != null) && (ground_planes[indx] != null);
final float [] src_img = use_alt?
(tilt_alt? ortho_maps[nmap].getAltData().data.clone(): ortho_maps[nmap].getAltData().data) :
ortho_maps[nmap].getImageData().data;
if (tilt_alt) {
// apply tilt to the source altitudes
double [] vert_meters = ortho_maps[nmap].getVertMeters();
// altitude at top-left pixel;
final double top_left_alt =
ground_planes[indx][2] - vert_meters[0]*ground_planes[indx][0] - vert_meters[1]*ground_planes[indx][1];
final double [] tilt_XY = {ground_planes[indx][0]/src_scale, ground_planes[indx][1]/src_scale};
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int nPix = ai.getAndIncrement(); nPix < src_img.length; nPix = ai.getAndIncrement()) {
int py = nPix / src_width;
int px = nPix % src_width;
src_img[nPix] -= (top_left_alt + px * tilt_XY[0] + py * tilt_XY[1]);
}
}
};
}
ImageDtt.startAndJoin(threads);
ai.set(0);
}
*/
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
for
(
int
nPix
=
ai
.
getAndIncrement
();
nPix
<
ownd_len
;
nPix
=
ai
.
getAndIncrement
())
{
int
opX
=
nPix
%
ownd_width
+
obounds
[
0
][
0
];
// absolute output pX, pY
int
opY
=
nPix
/
ownd_width
+
obounds
[
1
][
0
];
double
dX
=
(
opX
-
scaled_out_center
[
0
])
/
scale
;
// in
original image scale
double
dX
=
(
opX
-
scaled_out_center
[
0
])
/
scale
;
// in
meters
double
dY
=
(
opY
-
scaled_out_center
[
1
])
/
scale
;
double
[]
xy_src
=
{
// pixels of the source image
src_scale
*
(
affine
[
0
][
0
]*
dX
+
affine
[
0
][
1
]*
dY
+
affine
[
0
][
2
]
+
src_center
[
0
]),
src_scale
*
(
affine
[
1
][
0
]*
dX
+
affine
[
1
][
1
]*
dY
+
affine
[
1
][
2
]
+
src_center
[
1
])};
// limit to the source image
if
((((
int
)
opX
)==
dbg_x
)
&&
(((
int
)
opY
)==
dbg_y
))
{
System
.
out
.
println
(
"opX="
+
opX
+
", opy="
+
opY
);
}
if
((
warp_woi
!=
null
)
&&
(
warp_woi
.
contains
(
opX
,
opY
)))
{
if
((
opX
==
dbg_x
)
&&
(
opY
==
dbg_y
))
{
System
.
out
.
println
(
"opX="
+
opX
+
", opy="
+
opY
);
}
double
[]
dxy
=
warp
.
getWarp
(
opX
,
opY
);
xy_src
[
0
]
+=
dxy
[
0
];
xy_src
[
1
]
+=
dxy
[
1
];
}
if
((
xy_src
[
0
]
>=
0
)
&&
(
xy_src
[
0
]
<
(
src_width
-
1
))
&&
(
xy_src
[
1
]
>=
0
)
&&
(
xy_src
[
1
]
<
(
src_height
-
1
)))
{
int
[]
ixy_src
=
{(
int
)
Math
.
floor
(
xy_src
[
0
]),
(
int
)
Math
.
floor
(
xy_src
[
1
])
};
...
...
@@ -821,14 +905,16 @@ public class OrthoMapsCollection implements Serializable{
double
[][]
ground_planes
,
// null - images, non-null altitudes. use new double[2][] for old way alt
int
[]
indices
,
// null or which indices to use (normally just 2 for pairwise comparison)
boolean
bounds_to_indices
,
double
[][][]
affines
,
// null or [indices.length][2][3]
double
[][][]
affines
,
// null or [indices.length][2][3]
double
[][]
equalize_in
,
boolean
ignore_equalize
,
FineXYCorr
warp
,
int
zoom_level
,
int
[]
wh
,
int
[]
origin
,
// maps[0] as a reference
double
[][]
centers
){
boolean
use_alt
=
ground_planes
!=
null
;
final
int
dbg_x
=
707
,
dbg_y
=
615
;
final
int
dbg_x
=
707
,
dbg_y
=
-
615
;
int
[][]
bounds
;
if
(
affines
==
null
)
{
bounds
=
getBoundsPixels
(
// should be for rectified, {-bounds[0][0], -bounds[0][1]} - exact center
...
...
@@ -862,6 +948,7 @@ public class OrthoMapsCollection implements Serializable{
final
int
findx
=
indx
;
final
int
nmap
=
indices
[
indx
];
// nmap;
final
double
[][]
affine
=
(
affines
!=
null
)
?
affines
[
indx
]:
ortho_maps
[
nmap
].
affine
;
// only here use provided
final
double
[]
equalize
=
ignore_equalize
?
null
:((
equalize_in
!=
null
)
?
equalize_in
[
indx
]
:
ortho_maps
[
nmap
].
equalize
);
Arrays
.
fill
(
dpixels
[
findx
],
Float
.
NaN
);
final
double
scale
=
1.0
/
OrthoMap
.
getPixelSizeMeters
(
zoom_level
);
final
double
src_scale
=
1.0
/
OrthoMap
.
getPixelSizeMeters
(
ortho_maps
[
nmap
].
orig_zoom_level
);
// pix per meter
...
...
@@ -960,7 +1047,10 @@ public class OrthoMapsCollection implements Serializable{
d01
*
kxy
[
0
]
*(
1.0
-
kxy
[
1
])+
d10
*(
1.0
-
kxy
[
0
])*
kxy
[
1
]+
d11
*
kxy
[
0
]
*
kxy
[
1
];
dpixels
[
findx
][
opX
+
opY
*
width
]
=
(
float
)
d
;
if
(
equalize
!=
null
)
{
d
=
equalize
[
0
]
*
d
+
equalize
[
1
];
}
dpixels
[
findx
][
opX
+
opY
*
width
]
=
d
;
}
}
}
...
...
@@ -2351,10 +2441,12 @@ public class OrthoMapsCollection implements Serializable{
int
[]
origin
=
new
int
[
2
];
double
[][]
centers
=
new
double
[
indices
.
length
][];
double
[][]
dmulti
=
renderMultiDouble
(
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
true
,
// boolean bounds_to_indices,
true
,
// boolean bounds_to_indices,
affines
,
// double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
false
,
// boolean ignore_equalize,
warp
,
// FineXYCorr warp,,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
...
...
@@ -2367,12 +2459,13 @@ public class OrthoMapsCollection implements Serializable{
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
true
,
// boolean bounds_to_indices,
affines
,
// double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
warp
,
// FineXYCorr warp,,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
null
);
// centers); // double [][] centers) // already set with dmulti
}
...
...
@@ -4071,15 +4164,17 @@ public class OrthoMapsCollection implements Serializable{
int
[]
origin
=
new
int
[
2
];
double
[][]
centers
=
new
double
[
indices
.
length
][];
double
[][]
dmulti
=
renderMultiDouble
(
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
double
[]
overlaps
=
getFracOverlaps
(
dmulti
);
// final double [][] dmulti) {
...
...
@@ -4418,7 +4513,7 @@ public class OrthoMapsCollection implements Serializable{
if
(!
direct
&&
(
inv_match
!=
null
))
{
// prefer inverse
use_inverse
=
true
;
}
else
if
(
direct_match
!=
null
)
{
if
(
skip_existing
)
{
if
(
skip_existing
&&
(
direct_match
.
overlap
>
0
))
{
// do not skip old pairs - refine them
skip
=
true
;
}
else
if
(
refine_existing
)
{
use_direct
=
true
;
...
...
@@ -4751,15 +4846,17 @@ public class OrthoMapsCollection implements Serializable{
int
[]
origin
=
new
int
[
2
];
double
[][]
centers
=
show_centers
?
(
new
double
[
indices
.
length
][]):
null
;
double
[][]
dmulti
=
renderMultiDouble
(
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
if
(
margins
>
0
)
{
addMargins
(
dmulti
,
// double [][] dmulti,
...
...
@@ -4788,12 +4885,14 @@ public class OrthoMapsCollection implements Serializable{
new
double
[
indices
.
length
][]
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
if
(
margins
>
0
)
{
addMargins
(
dmulti
,
// double [][] dmulti,
...
...
@@ -4818,15 +4917,17 @@ public class OrthoMapsCollection implements Serializable{
int
[]
origin
=
new
int
[
2
];
double
[][]
centers
=
show_centers
?
(
new
double
[
indices
.
length
][]):
null
;
double
[][]
dmulti
=
renderMultiDouble
(
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
if
(
margins
>
0
)
{
addMargins
(
dmulti
,
// double [][] dmulti,
...
...
@@ -4857,15 +4958,17 @@ public class OrthoMapsCollection implements Serializable{
int
[]
origin
=
new
int
[
2
];
double
[][]
centers
=
show_centers
?
(
new
double
[
indices
.
length
][]):
null
;
double
[][]
dmulti
=
renderMultiDouble
(
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
null
,
// double [][] ground_planes, // null - images, non-null altitudes. use new double[2][3] for old way alt
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
bounds_to_indices
,
// boolean bounds_to_indices,
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
null
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_lev
,
// int zoom_level,
wh
,
// int [] wh,
origin
,
// int [] origin){ // maps[0] as a reference
centers
);
// double [][] centers)
if
(
margins
>
0
)
{
addMargins
(
dmulti
,
// double [][] dmulti,
...
...
src/main/java/com/elphel/imagej/orthomosaic/OrthoMultiLMA.java
View file @
1671728b
...
...
@@ -40,7 +40,8 @@ import ij.ImagePlus;
import
ij.gui.PointRoi
;
public
class
OrthoMultiLMA
{
final
boolean
move_only
;
final
boolean
move_only
;
final
boolean
corr_avg
;
// correct average skew, tilt, scale
private
double
[]
last_rms
=
null
;
// {rms, rms_pure}, matching this.vector
private
double
[]
good_or_bad_rms
=
null
;
// just for diagnostics, to read last (failed) rms
private
double
[]
initial_rms
=
null
;
// {rms, rms_pure}, first-calcualted rms
...
...
@@ -61,7 +62,11 @@ public class OrthoMultiLMA {
private
double
[][]
offsets
=
null
;
// scene offsets (rd)
private
int
num_scenes
=
0
;
private
int
num_pairs
=
0
;
public
OrthoMultiLMA
(
boolean
move_only
)
{
public
OrthoMultiLMA
(
boolean
corr_avg
,
boolean
move_only
)
{
this
.
corr_avg
=
corr_avg
&&
!
move_only
;
this
.
move_only
=
move_only
;
}
public
static
boolean
testMultiLMA
(
...
...
@@ -122,6 +127,8 @@ public class OrthoMultiLMA {
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
true
,
// boolean bounds_to_indices,
affines_a
,
// affines_0d, // affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
...
...
@@ -147,6 +154,8 @@ public class OrthoMultiLMA {
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
true
,
// boolean bounds_to_indices,
affines_sym
,
// affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
...
...
@@ -213,19 +222,26 @@ public class OrthoMultiLMA {
int
debugLevel
=
1
;
boolean
move_only
=
false
;
double
[]
val_coord
=
null
;
// 1 - valid, 0 - invalid, minimize coordinates errors
double
position_pull
=
0.001
;
boolean
use_inv
=
false
;
double
overlap_pow
=
2.0
;
// match weight as overlap fraction to this power
double
skew_pull
=
1.0
;
double
tilt_pull
=
1.0
;
double
scale_pull
=
0.1
;
// .0;
double
position_pull
=
0.0001
;
boolean
corr_avg
=
(
skew_pull
>
0
)
||
(
tilt_pull
>
0
)
||
(
scale_pull
>
0
);
int
[]
indices
=
maps_collection
.
getScenesSelection
(
null
,
// boolean select_all,
" to build a map"
);
// String purpose)
OrthoMultiLMA
oml
=
new
OrthoMultiLMA
(
move_only
);
OrthoMultiLMA
oml
=
new
OrthoMultiLMA
(
corr_avg
,
move_only
);
double
lambda
=
0.1
;
double
lambda_scale_good
=
0.5
;
double
lambda_scale_bad
=
8.0
;
double
lambda_max
=
100
;
double
rms_diff
=
0.000001
;
int
num_iter
=
2
0
;
int
num_iter
=
100
;
// 5
0;
boolean
last_run
=
false
;
oml
.
prepareLMA
(
clt_parameters
,
// CLTParameters clt_parameters,
...
...
@@ -233,6 +249,9 @@ public class OrthoMultiLMA {
indices
,
// int [] indices,
val_coord
,
// double [] val_coord, // 1 - valid, 0 - invalid, minimize coordinates errors
position_pull
,
// double position_pull,
skew_pull
,
// double skew_pull,
tilt_pull
,
// double tilt_pull,
scale_pull
,
// double scale_pull,
use_inv
,
// boolean use_inv,
overlap_pow
,
// double overlap_pow, // match weight as overlap fraction to this power
debugLevel
);
// int debugLevel)
...
...
@@ -256,7 +275,7 @@ public class OrthoMultiLMA {
//Get and apply affines
//oml.updateAffines(maps_collection);
double
[][][]
affines
=
oml
.
getAffines
();
double
[]
fx
=
oml
.
getFx
();
// test
/*
PairwiseOrthoMatch match = maps_collection.ortho_maps[indices[0]].getMatch(maps_collection.ortho_maps[indices[1]].getName());
...
...
@@ -284,6 +303,8 @@ public class OrthoMultiLMA {
indices
,
// int [] indices, // null or which indices to use (normally just 2 for pairwise comparison)
true
,
// boolean bounds_to_indices,
affines
,
// null, // affines, // double [][][] affines, // null or [indices.length][2][3]
null
,
// double [][] equalize,
true
,
// boolean ignore_equalize,
null
,
// warp, // FineXYCorr warp,,
zoom_level
,
// int zoom_level,
wh
,
// int [] wh,
...
...
@@ -307,6 +328,7 @@ public class OrthoMultiLMA {
roi
.
setOptions
(
"label"
);
imp_multi
.
setRoi
(
roi
);
imp_multi
.
show
();
/*
double [][] affine = getAffineAndDerivatives(
move_only, //boolean move_only,
affines[0], // double [][] affine00,
...
...
@@ -317,7 +339,7 @@ public class OrthoMultiLMA {
affines[0],
affines[1],
oml.offsets[0]);
*/
/*
...
...
@@ -336,6 +358,16 @@ public class OrthoMultiLMA {
return
0
;
}
public
double
[]
getParameters
()
{
return
parameters_vector
;
}
public
double
[]
getFx
()
{
return
getFxDerivs
(
parameters_vector
,
// double [] vector,
null
,
// final double [][] jt, // should be null or initialized with [vector.length][]
0
);
// final int debug_level)
}
public
int
prepareLMA
(
CLTParameters
clt_parameters
,
...
...
@@ -343,6 +375,9 @@ public class OrthoMultiLMA {
int
[]
indices
,
double
[]
val_coord
,
// 1 - valid, 0 - invalid, minimize coordinates errors
double
position_pull
,
double
skew_pull
,
double
tilt_pull
,
double
scale_pull
,
boolean
use_inv
,
double
overlap_pow
,
// match weight as overlap fraction to this power
int
debugLevel
)
{
...
...
@@ -383,7 +418,6 @@ public class OrthoMultiLMA {
for
(
int
np
=
0
;
np
<
num_pairs
;
np
++)
{
Point
pair
=
pairs_list
.
get
(
np
);
pairs
[
np
]
=
new
int
[]
{
pair
.
x
,
pair
.
y
};
// double [] enuOffset = maps_collection.ortho_maps[indices[pair.x]].enuOffsetTo(maps_collection.ortho_maps[indices[pair.y]]);
double
[]
enuOffset
=
maps_collection
.
ortho_maps
[
indices
[
pair
.
y
]].
enuOffsetTo
(
maps_collection
.
ortho_maps
[
indices
[
pair
.
x
]]);
offsets
[
np
]
=
new
double
[]
{
enuOffset
[
0
],
-
enuOffset
[
1
]};
// {right,down} of the image
}
...
...
@@ -396,9 +430,8 @@ public class OrthoMultiLMA {
int
n62
=
move_only
?
2
:
6
;
int
n13
=
move_only
?
1
:
3
;
y_vector
=
new
double
[
n62
*
num_pairs
+
2
*
num_scenes
];
// last 2 * num_scenes will stay 0
weights
=
new
double
[
n62
*
num_pairs
+
2
*
num_scenes
];
y_vector
=
new
double
[
n62
*
num_pairs
+
2
*
num_scenes
+
(
corr_avg
?
3
:
0
)];
// last 2 * num_scenes + 3 will stay 0
weights
=
new
double
[
n62
*
num_pairs
+
2
*
num_scenes
+
(
corr_avg
?
3
:
0
)];
parameters_vector
=
new
double
[
n62
*
num_scenes
];
// maybe will need move-only mode?
for
(
int
n
=
0
;
n
<
num_scenes
;
n
++)
{
double
[][]
affine
=
maps_collection
.
ortho_maps
[
indices
[
n
]].
getAffine
();
...
...
@@ -441,7 +474,12 @@ public class OrthoMultiLMA {
weights
[
wi
++]
=
w
;
weights
[
wi
++]
=
w
;
sw
+=
2
*
w
;
}
}
if
(
corr_avg
)
{
weights
[
wi
++]
=
skew_pull
;
weights
[
wi
++]
=
tilt_pull
;
weights
[
wi
++]
=
scale_pull
;
}
pure_weight
=
swp
/
sw
;
double
s
=
1.0
/
sw
;
for
(
int
i
=
0
;
i
<
weights
.
length
;
i
++)
{
...
...
@@ -1166,6 +1204,49 @@ public class OrthoMultiLMA {
};
}
ImageDtt
.
startAndJoin
(
threads
);
ai
.
set
(
0
);
if
(
corr_avg
)
{
// && ((skew_pull > 0) || (tilt_pull > 0) || (scale_pull > 0))) {
final
int
pull_indices
=
weights
.
length
-
3
;
for
(
int
nScene
=
0
;
nScene
<
num_scenes
;
nScene
++){
int
indx
=
6
*
nScene
;
double
a00
=
vector
[
indx
+
0
],
a01
=
vector
[
indx
+
1
],
a10
=
vector
[
indx
+
3
],
a11
=
vector
[
indx
+
4
];
double
aa1
=
0.5
*(
a00
*
a00
+
a10
*
a10
);
double
aa2
=
0.5
*(
a01
*
a01
+
a11
*
a11
);
fx
[
pull_indices
+
0
]
+=
a00
*
a01
+
a10
*
a11
;
fx
[
pull_indices
+
1
]
+=
aa1
-
aa2
;
fx
[
pull_indices
+
2
]
+=
aa1
+
aa2
-
1.0
;
}
if
(
jt
!=
null
)
{
for
(
int
ithread
=
0
;
ithread
<
threads
.
length
;
ithread
++)
{
threads
[
ithread
]
=
new
Thread
()
{
public
void
run
()
{
for
(
int
nScene
=
ai
.
getAndIncrement
();
nScene
<
num_scenes
;
nScene
=
ai
.
getAndIncrement
()){
int
indx
=
6
*
nScene
;
double
a00
=
vector
[
indx
+
0
],
a01
=
vector
[
indx
+
1
],
a10
=
vector
[
indx
+
3
],
a11
=
vector
[
indx
+
4
];
jt
[
indx
+
0
][
pull_indices
+
0
]
=
a01
;
jt
[
indx
+
1
][
pull_indices
+
0
]
=
a00
;
jt
[
indx
+
3
][
pull_indices
+
0
]
=
a11
;
jt
[
indx
+
4
][
pull_indices
+
0
]
=
a10
;
jt
[
indx
+
0
][
pull_indices
+
1
]
=
a00
;
jt
[
indx
+
1
][
pull_indices
+
1
]
=-
a01
;
jt
[
indx
+
3
][
pull_indices
+
1
]
=
a10
;
jt
[
indx
+
4
][
pull_indices
+
1
]
=-
a11
;
jt
[
indx
+
0
][
pull_indices
+
2
]
=
a00
;
jt
[
indx
+
1
][
pull_indices
+
2
]
=
a01
;
jt
[
indx
+
3
][
pull_indices
+
2
]
=
a10
;
jt
[
indx
+
4
][
pull_indices
+
2
]
=
a11
;
}
}
};
}
ImageDtt
.
startAndJoin
(
threads
);
ai
.
set
(
0
);
}
}
return
fx
;
}
...
...
src/main/java/com/elphel/imagej/orthomosaic/PairwiseOrthoMatch.java
View file @
1671728b
...
...
@@ -11,10 +11,12 @@ public class PairwiseOrthoMatch implements Serializable {
private
static
final
long
serialVersionUID
=
1L
;
public
double
[][]
affine
=
new
double
[
2
][
3
];
public
transient
double
[][]
jtj
=
new
double
[
6
][
6
];
public
int
zoom_lev
;
public
double
rms
=
Double
.
NaN
;
public
transient
int
[]
nxy
=
null
;
// not saved, just to communicate for logging
public
transient
double
overlap
=
0.0
;
public
int
zoom_lev
;
public
double
rms
=
Double
.
NaN
;
public
transient
int
[]
nxy
=
null
;
// not saved, just to communicate for logging
public
transient
double
overlap
=
0.0
;
public
transient
double
[]
equalize1to0
=
{
1
,
0
};
// value1 = equalize2to1[0]*value2+equalize2to1[1]
public
PairwiseOrthoMatch
()
{
}
...
...
@@ -30,6 +32,11 @@ public class PairwiseOrthoMatch implements Serializable {
this
.
rms
=
rms
;
this
.
overlap
=
overlap
;
}
public
void
setEqualize2to1
(
double
[]
equalize2to1
)
{
this
.
equalize1to0
=
equalize2to1
;
}
public
PairwiseOrthoMatch
clone
()
{
double
[][]
affine
=
{
this
.
affine
[
0
].
clone
(),
this
.
affine
[
1
].
clone
()};
double
[][]
jtj
=
new
double
[
this
.
jtj
.
length
][];
...
...
@@ -45,6 +52,7 @@ public class PairwiseOrthoMatch implements Serializable {
if
(
nxy
!=
null
)
{
pom
.
nxy
=
nxy
.
clone
();
}
pom
.
equalize1to0
=
this
.
equalize1to0
.
clone
();
return
pom
;
}
/**
...
...
@@ -102,6 +110,7 @@ public class PairwiseOrthoMatch implements Serializable {
rd
[
0
]
*
affine
[
1
][
0
]+
rd
[
1
]*(
affine
[
1
][
1
]-
1.0
)};
affine
[
0
][
2
]
+=
corr
[
0
];
affine
[
1
][
2
]
+=
corr
[
1
];
inverted_match
.
setEqualize2to1
(
new
double
[]
{
1
/
equalize1to0
[
0
],
-
equalize1to0
[
1
]/
equalize1to0
[
0
]});
return
inverted_match
;
}
...
...
@@ -168,10 +177,16 @@ public class PairwiseOrthoMatch implements Serializable {
affine
=
new
double
[][]
{
{
A
.
get
(
0
,
0
),
A
.
get
(
0
,
1
),
B
.
get
(
0
,
0
)},
{
A
.
get
(
1
,
0
),
A
.
get
(
1
,
1
),
B
.
get
(
1
,
0
)}};
// jtj = null;
// rms = Double.NaN; // double rms,
// zoom_lev = 0; // int zoom_lev)
}
public
void
combineEqualize
(
double
[]
equalize0
,
double
[]
equalize1
)
{
setEqualize2to1
(
new
double
[]
{
equalize1
[
0
]/
equalize0
[
0
],
equalize1
[
1
]-
equalize1
[
0
]/
equalize0
[
0
]*
equalize0
[
1
]});
}
public
static
double
[][]
combineAffines
(
double
[][]
affine0
,
double
[][]
affine
,
// differential
...
...
@@ -203,6 +218,8 @@ public class PairwiseOrthoMatch implements Serializable {
}
}
oos
.
writeObject
(
overlap
);
oos
.
writeObject
(
equalize1to0
);
}
private
void
readObject
(
ObjectInputStream
ois
)
throws
ClassNotFoundException
,
IOException
{
ois
.
defaultReadObject
();
...
...
@@ -216,6 +233,8 @@ public class PairwiseOrthoMatch implements Serializable {
}
}
overlap
=
(
Double
)
ois
.
readObject
();
// equalize1to0 = new double[] {1,0};
equalize1to0
=
(
double
[])
ois
.
readObject
();
}
//private void readObjectNoData() throws ObjectStreamException; // used to modify default values
}
src/main/java/com/elphel/imagej/tileprocessor/TileNeibs.java
View file @
1671728b
...
...
@@ -1356,4 +1356,5 @@ public class TileNeibs{
}
return
filled
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment