Commit f9ea6e08 authored by Andrey Filippov's avatar Andrey Filippov

Started OriantationSceneLMA

parent ecbbff57
...@@ -177,7 +177,7 @@ public class ComboMatch { ...@@ -177,7 +177,7 @@ public class ComboMatch {
boolean use_saved_collection = true; // false; boolean use_saved_collection = true; // false;
boolean save_collection = true; boolean save_collection = false; // true;
boolean process_correlation = true; // use false to save new version of data boolean process_correlation = true; // use false to save new version of data
int num_tries_fit = 10; int num_tries_fit = 10;
boolean update_match = true; // use false to save new version of data boolean update_match = true; // use false to save new version of data
......
...@@ -258,7 +258,6 @@ public class ERSTiltLMA { ...@@ -258,7 +258,6 @@ public class ERSTiltLMA {
weight_pure *= k; weight_pure *= k;
last_jt = new double [parameters_vector.length][]; last_jt = new double [parameters_vector.length][];
return weights.length; return weights.length;
} }
private double [] getFxDerivs( private double [] getFxDerivs(
...@@ -331,6 +330,8 @@ public class ERSTiltLMA { ...@@ -331,6 +330,8 @@ public class ERSTiltLMA {
ImageDtt.startAndJoin(threads); ImageDtt.startAndJoin(threads);
return fX; return fX;
} }
private double [][] getFxDerivsDelta( private double [][] getFxDerivsDelta(
double [] vector, double [] vector,
final double delta, final double delta,
......
/**
** OrientationSceneLMA - Fit multiple scenes orientations/scales when some
** pairwise mutual orientations/scales are known as quaternions (not unit ones)
** into a set of output 3D vectors
**
** Copyright (C) 2024 Elphel, Inc.
**
** -----------------------------------------------------------------------------**
**
** OrientationSceneLMA.java is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
** -----------------------------------------------------------------------------**
**
*/
package com.elphel.imagej.orthomosaic;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import com.elphel.imagej.tileprocessor.ImageDtt;
import Jama.Matrix;
public class OrientationSceneLMA {
public int [] indices;
public int [][] cpairs = null;
public int num_scenes = 0;
public int num_pairs = 0;
private double [] last_rms = null; // {rms, rms_pure}, matching this.vector
private double [] good_or_bad_rms = null; // just for diagnostics, to read last (failed) rms
private double [] initial_rms = null; // {rms, rms_pure}, first-calcualted rms
private double [] parameters_vector = null;
private double [][] qpairs = null;
private double weight_pure = 0;
private double [] weights; // normalized so sum is 1.0 for all - samples and extra regularization
private double [] last_ymfx = null;
private double [][] last_jt = null;
public boolean last3only = false; // true; // debug feature
public double delta= 1e-7;
public int prepareLMA(
int [] indices, // should all be used
int [][] cpairs,
double [] weights_pairs, // from matching tilts(flatness) (and worst sfm, per-pair rmse)?
double weight_rot, // >0 weight of pairs errors in qn3
double weight_tilt, // >0 weight of pairs errors in qn1, qn2
double weight_scale, // >0 weight in pairs scale-1.0 errors
double pull, // 0 <= pull <1 - fraction of all RMS contributors
double pull_rots, // >=0 weight of sum of rotations, may be 0, normalized by pull value
double pull_tilts, // >=0 weights of sum of qn1 and qn2 of scenes, normalized by pull value
double pull_scales, // >=0 weights of scales of scenes, normalized by pull value
double [][] qpairs, // [pair][4] quaternions for pairs - orientation and scale (non-unity quaternions)
int debug_level) {
this.indices = indices; // maybe not needed
this.cpairs = cpairs;
num_scenes = indices.length;
num_pairs = cpairs.length;
// qpairs = new double [num_pairs][];
if (weights_pairs == null) {
weights_pairs = new double[num_pairs];
Arrays.fill(weights_pairs, 1.0);
}
weights = new double [num_pairs*4+4];
double sum_weights = 0;
double [] weights4 = {weight_scale, weight_tilt/2, weight_tilt/2, weight_rot};
double [] pull4 = {pull_scales, pull_tilts/2, pull_tilts/2, pull_rots};
double sum_pull4 = 0;
for (double w: weights4) sum_pull4+= w;
if (sum_pull4 <= 0) {
pull = 0;
}
for (int npair = 0; npair < num_pairs; npair++) {
for (int i = 0; i < 4; i++) {
int indx = 4* npair+ i;
double w = weights_pairs[npair] * weights4[i];
weights[ indx] = w;
sum_weights += w;
}
}
weight_pure = 1 - pull;
double k1 = (1.0 - pull)/sum_weights;
double k2 = (pull>0) ? (pull/sum_pull4): 0;
for (int i = 0; i < 4*num_pairs; i++) {
weights[i] *= k1;
}
for (int i = 0; i < 4; i++) {
weights[4 * num_pairs + i] = k2*weights4[i];
}
parameters_vector = new double [4 * num_scenes];
for (int i = 0; i < num_scenes; i++) {
parameters_vector[4*i] = 1.0; //
}
last_jt = new double [parameters_vector.length][];
this.qpairs = qpairs;
return weights.length;
}
private double [] getFxDerivs(
final double [] vector,
final double [][] jt, // should be null or initialized with [vector.length][]
final int debug_level)
{
double [] fX = new double [weights.length]; // num_pairs + vector.length];
if (jt != null) {
for (int i = 0; i < jt.length; i++) {
jt[i] = new double [weights.length]; // weights.length];
}
}
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
final AtomicInteger ati = new AtomicInteger(0);
// first cycle - minimize per-pair errors (differences between singular values)
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
double [] qscene1 = new double[4];
double [] qscene2 = new double[4];
double [][][] derivs = (jt!=null) ? (new double [2][][]): null;
for (int npair = ai.getAndIncrement(); npair < num_pairs; npair = ai.getAndIncrement()) {
int [] scene_ind = {4 * cpairs[npair][0], 4 * cpairs[npair][1]};
System.arraycopy(vector, scene_ind[0], qscene1, 0, 4);
System.arraycopy(vector, scene_ind[1], qscene2, 0, 4);
double [] fx_frag =getPairScaleDirError( //{scale-1, qn1,qn2,qn3}
qscene1, // double [] qscene1,
qscene2, // double [] qscene2,
qpairs[npair], // double [] qpair12,
derivs); // double [][][] derivs)
System.arraycopy(
fx_frag,
0,
fX,
fx_frag.length*npair,
fx_frag.length);
if (jt!=null) {
for (int iscene = 0; iscene <2; iscene++) {
for (int npar = 0; npar <4; npar++) {
System.arraycopy(
derivs[iscene][npar], // null
0,
jt[scene_ind[iscene]+npar],
fx_frag.length*npair,
fx_frag.length);
}
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
ai.set(0);
// second cycle - regularization (average scale diffs, q1,q2,q3}
double [][] pull_threads = new double [threads.length][4];
final int pull_index = num_pairs*4; // start of pull sums in fx
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
int nthread = ati.getAndIncrement();
double [] qscene = new double[4];
for (int nscene = ai.getAndIncrement(); nscene < num_scenes; nscene = ai.getAndIncrement()) {
System.arraycopy(vector, nscene*4, qscene, 0, 4);
double [] qscene_norm=QuatUtils.normalize(qscene); // use q1,q2,q3, maybe q1 and q2 (from DEM) lower weight than q3 (from images)
double scale_diff = QuatUtils.norm(qscene)-1.0;
qscene_norm[0] = scale_diff; // {scale_diff, qn1, qn2, qn3};
for (int i = 0; i < 4; i++) {
pull_threads[nthread][i] += qscene_norm[i];
}
if (jt != null) {
double [][] dnq_dq = QuatUtils.dnormalize_dq(qscene);
double [] dscale = QuatUtils.dscale_dq(qscene);
dnq_dq[0] = dscale;
for (int npar = 0; npar <4; npar++) {
System.arraycopy(
dnq_dq[npar],
0 ,
jt[nscene*4+npar],
pull_index,
4);
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
// sum partial sums from threads
for (int nthread = 0; nthread < pull_threads.length; nthread++ ) {
for (int i = 0; i < 4; i++) {
fX[pull_index+i] += pull_threads[nthread][i];
}
}
return fX;
}
private double [][] getFxDerivsDelta(
double [] vector,
final double delta,
final int debug_level) {
double [][] jt = new double [vector.length][weights.length];
for (int nv = 0; nv < vector.length; nv++) {
double [] vpm = vector.clone();
vpm[nv]+= 0.5*delta;
double [] fx_p = getFxDerivs(
vpm,
null, // final double [][] jt, // should be null or initialized with [vector.length][]
debug_level);
vpm[nv]-= delta;
double [] fx_m = getFxDerivs(
vpm,
null, // final double [][] jt, // should be null or initialized with [vector.length][]
debug_level);
for (int i = 0; i < weights.length; i++) if (weights[i] > 0) {
jt[nv][i] = (fx_p[i]-fx_m[i])/delta;
}
}
return jt;
}
private double compareJT(
double [] vector,
double delta,
boolean last3only) { // do not process samples - they are tested before
double [] errors=new double [vector.length];
double [][] jt = new double [vector.length][];
System.out.print("Parameters vector = [");
for (int i = 0; i < vector.length; i++) {
System.out.print(vector[i]);
if (i < (vector.length -1)) System.out.print(", ");
}
System.out.println("]");
getFxDerivs(
vector,
jt, // final double [][] jt, // should be null or initialized with [vector.length][]
1); // debug_level);
double [][] jt_delta = getFxDerivsDelta(
vector, // double [] vector,
delta, // final double delta,
-1); // final int debug_level)
int start_index = last3only? (weights.length-3) : 0;
for (int n = start_index; n < weights.length; n++) if (weights[n] > 0) {
System.out.print(String.format("%3d",n));
for (int i = 0; i < vector.length; i++) {
System.out.print(String.format("\t%12.9f",jt[i][n]));
}
for (int i = 0; i < vector.length; i++) {
System.out.print(String.format("\t%12.9f",jt_delta[i][n]));
}
for (int i = 0; i < vector.length; i++) {
System.out.print(String.format("\t%12.9f",jt[i][n]-jt_delta[i][n]));
}
System.out.println();
for (int i = 0; i < vector.length; i++) {
errors[i] = Math.max(errors[i], jt[i][n]-jt_delta[i][n]);
}
}
for (int i = 0; i < vector.length; i++) {
System.out.print("\t\t");
}
for (int i = 0; i < vector.length; i++) {
System.out.print(String.format("\t%12.9f",errors[i]));
}
double err=0;
for (int i = 0; i < vector.length; i++) {
err = Math.max(errors[i], err);
}
return err;
}
private double [] getYminusFxWeighted(
final double [] fx,
final double [] rms_fp // null or [2]
) {
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
final AtomicInteger ati = new AtomicInteger(0);
final double [] wymfw = new double [fx.length];
double [] swd2 = new double[threads.length];
for (int ithread = 0; ithread < threads.length; ithread++) { // first sum for pairs
threads[ithread] = new Thread() {
public void run() {
int nthread = ati.getAndIncrement();
for (int n = ai.getAndIncrement(); n < 4*num_pairs; n = ai.getAndIncrement()) {
double d = -fx[n]; // - fx[n]; // +y_vector[i]
double wd = d * weights[n];
wymfw[n] = wd;
swd2[nthread] += d * wd;
}
}
};
}
ImageDtt.startAndJoin(threads);
double s_rms_pure = 0;
for (int n = 0; n < swd2.length; n++) {
s_rms_pure += swd2[n];
}
// System.out.println("ai.get()="+ai.get());
// important to set - after first cycle ai is left 16(number of threads) larger than number of cycles!
// It is so, because it first increments, then tests if (n < num_pairs)
ai.set(4*num_pairs);
ati.set(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
int nthread = ati.getAndIncrement();
for (int n = ai.getAndIncrement(); n < fx.length; n = ai.getAndIncrement()) {
double d = -fx[n]; // - fx[n]; // +y_vector[i]
double wd = d * weights[n];
wymfw[n] = wd;
swd2[nthread] += d * wd;
}
}
};
}
ImageDtt.startAndJoin(threads);
double s_rms = 0; // start from scratch
for (int n = 0; n < swd2.length; n++) {
s_rms += swd2[n];
}
if (rms_fp != null) {
rms_fp[0] = Math.sqrt(s_rms);
rms_fp[1] = Math.sqrt(s_rms_pure/weight_pure);
}
return wymfw;
}
public int runLma( // <0 - failed, >=0 iteration number (1 - immediately)
double lambda, // 0.1
double lambda_scale_good,// 0.5
double lambda_scale_bad, // 8.0
double lambda_max, // 100
double rms_diff, // 0.001
int num_iter, // 20
boolean last_run,
String dbg_prefix,
int debug_level)
{
boolean [] rslt = {false,false};
this.last_rms = null; // remove?
int iter = 0;
if (dbg_prefix != null) {
// debugStateImage(dbg_prefix+"-initial");
}
for (iter = 0; iter < num_iter; iter++) {
rslt = lmaStep(
lambda,
rms_diff,
debug_level);
if (dbg_prefix != null) {
// debugStateImage(dbg_prefix+"-step_"+iter);
}
if (rslt == null) {
return -1; // false; // need to check
}
if (debug_level > 1) {
System.out.println("LMA step"+String.format("%3d",iter)+": {"+rslt[0]+","+rslt[1]+"} full RMS= "+good_or_bad_rms[0]+
" ("+initial_rms[0]+"), pure RMS="+good_or_bad_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda);
}
if (rslt[1]) {
break;
}
if (rslt[0]) { // good
lambda *= lambda_scale_good;
} else {
lambda *= lambda_scale_bad;
if (lambda > lambda_max) {
break; // not used in lwir
}
}
}
if (rslt[0]) { // better
if (iter >= num_iter) { // better, but num tries exceeded
if (debug_level > 1) System.out.println("Step "+iter+": Improved, but number of steps exceeded maximal");
} else {
if (debug_level > 1) System.out.println("Step "+iter+": LMA: Success");
}
} else { // improved over initial ?
if (last_rms[0] < initial_rms[0]) { // NaN
rslt[0] = true;
if (debug_level > 1) System.out.println("Step "+iter+": Failed to converge, but result improved over initial");
} else {
if (debug_level > 1) System.out.println("Step "+iter+": Failed to converge");
}
}
boolean show_intermediate = true;
if (show_intermediate && (debug_level > 0)) {
System.out.println("LMA: full RMS="+last_rms[0]+" ("+initial_rms[0]+"), pure RMS="+last_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda);
}
if (debug_level > 2){
System.out.println("iteration="+iter);
}
if (debug_level > 0) {
if ((debug_level > 1) || last_run) { // (iter == 1) || last_run) {
if (!show_intermediate) {
System.out.println("LMA: iter="+iter+", full RMS="+last_rms[0]+" ("+initial_rms[0]+"), pure RMS="+last_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda);
}
}
}
if ((debug_level > -2) && !rslt[0]) { // failed
if ((debug_level > 1) || (iter == 1) || last_run) {
System.out.println("LMA failed on iteration = "+iter);
}
System.out.println();
}
return rslt[0]? iter : -1;
}
private boolean [] lmaStep(
double lambda,
double rms_diff,
int debug_level) {
boolean [] rslt = {false,false};
// maybe the following if() branch is not needed - already done in prepareLMA !
if (this.last_rms == null) { //first time, need to calculate all (vector is valid)
last_rms = new double[2];
if (debug_level > 1) {
System.out.println("lmaStep(): first step");
}
double [] fx = getFxDerivs(
parameters_vector, // double [] vector,
last_jt, // final double [][] jt, // should be null or initialized with [vector.length][]
debug_level); // final int debug_level)
last_ymfx = getYminusFxWeighted(
fx, // final double [] fx,
last_rms); // final double [] rms_fp // null or [2]
this.initial_rms = this.last_rms.clone();
this.good_or_bad_rms = this.last_rms.clone();
if (debug_level > -1) { // temporary
/*
dbgYminusFxWeight(
this.last_ymfx,
this.weights,
"Initial_y-fX_after_moving_objects");
*/
}
if (last_ymfx == null) {
return null; // need to re-init/restart LMA
}
// TODO: Restore/implement
if (debug_level > 3) {
double delta = this.delta;
double delta_err=compareJT(
parameters_vector, // double [] vector,
delta, // double delta,
last3only); // boolean last3only); // do not process samples - they are tested before
System.out.println("\nMaximal error = "+delta_err);
}
}
if (debug_level > 3) { // 0) {
double delta = this.delta; // 1E-3;
double delta_err=compareJT(
parameters_vector, // double [] vector,
delta, // double delta,
last3only); // boolean last3only); // do not process samples - they are tested before
System.out.println("\nMaximal error = "+delta_err);
}
Matrix y_minus_fx_weighted = new Matrix(this.last_ymfx, this.last_ymfx.length);
Matrix wjtjlambda = new Matrix(getWJtJlambda(
lambda, // *10, // temporary
this.last_jt)); // double [][] jt) // null
if (debug_level>2) {
System.out.println("JtJ + lambda*diag(JtJ");
wjtjlambda.print(18, 6);
}
Matrix jtjl_inv = null;
try {
jtjl_inv = wjtjlambda.inverse(); // check for errors
} catch (RuntimeException e) {
rslt[1] = true;
if (debug_level > 0) {
System.out.println("Singular Matrix!");
}
return rslt;
}
if (debug_level>2) {
System.out.println("(JtJ + lambda*diag(JtJ).inv()");
jtjl_inv.print(18, 6);
}
//last_jt has NaNs
Matrix jty = (new Matrix(this.last_jt)).times(y_minus_fx_weighted);
if (debug_level>2) {
System.out.println("Jt * (y-fx)");
jty.print(18, 6);
}
Matrix mdelta = jtjl_inv.times(jty);
if (debug_level>2) {
System.out.println("mdelta");
mdelta.print(18, 6);
}
double scale = 1.0;
double [] delta = mdelta.getColumnPackedCopy();
double [] new_vector = parameters_vector.clone();
for (int i = 0; i < parameters_vector.length; i++) {
new_vector[i] += scale * delta[i];
}
double [] fx = getFxDerivs(
new_vector, // double [] vector,
last_jt, // final double [][] jt, // should be null or initialized with [vector.length][]
debug_level); // final int debug_level)
double [] rms = new double[2];
last_ymfx = getYminusFxWeighted(
// vector_XYS, // final double [][] vector_XYS,
fx, // final double [] fx,
rms); // final double [] rms_fp // null or [2]
if (debug_level > 2) {
/*
dbgYminusFx(this.last_ymfx, "next y-fX");
dbgXY(new_vector, "XY-correction");
*/
}
if (last_ymfx == null) {
return null; // need to re-init/restart LMA
}
this.good_or_bad_rms = rms.clone();
if (rms[0] < this.last_rms[0]) { // improved
rslt[0] = true;
rslt[1] = rms[0] >=(this.last_rms[0] * (1.0 - rms_diff));
this.last_rms = rms.clone();
this.parameters_vector = new_vector.clone();
if (debug_level > 2) {
// print vectors in some format
/*
System.out.print("delta: "+corr_delta.toString()+"\n");
System.out.print("New vector: "+new_vector.toString()+"\n");
System.out.println();
*/
}
} else { // worsened
rslt[0] = false;
rslt[1] = false; // do not know, caller will decide
// restore state
fx = getFxDerivs(
parameters_vector, // double [] vector,
last_jt, // final double [][] jt, // should be null or initialized with [vector.length][]
debug_level); // final int debug_level)
last_ymfx = getYminusFxWeighted(
fx, // final double [] fx,
this.last_rms); // final double [] rms_fp // null or [2]
if (last_ymfx == null) {
return null; // need to re-init/restart LMA
}
if (debug_level > 2) {
/*
dbgJacobians(
corr_vector, // GeometryCorrection.CorrVector corr_vector,
1E-5, // double delta,
true); //boolean graphic)
*/
}
}
return rslt;
}
private double [][] getWJtJlambda(
final double lambda,
final double [][] jt)
{
final int num_pars = jt.length;
final int num_pars2 = num_pars * num_pars;
final int nup_points = jt[0].length;
final double [][] wjtjl = new double [num_pars][num_pars];
final Thread[] threads = ImageDtt.newThreadArray();
final AtomicInteger ai = new AtomicInteger(0);
for (int ithread = 0; ithread < threads.length; ithread++) {
threads[ithread] = new Thread() {
public void run() {
for (int indx = ai.getAndIncrement(); indx < num_pars2; indx = ai.getAndIncrement()) {
int i = indx / num_pars;
int j = indx % num_pars;
if (j >= i) {
double d = 0.0;
for (int k = 0; k < nup_points; k++) {
if (jt[i][k] != 0) {
d+=0;
}
d += weights[k]*jt[i][k]*jt[j][k];
}
wjtjl[i][j] = d;
if (i == j) {
wjtjl[i][j] += d * lambda;
} else {
wjtjl[j][i] = d;
}
}
}
}
};
}
ImageDtt.startAndJoin(threads);
return wjtjl;
}
public double [] getRms() {
return last_rms;
}
public double [] getInitialRms() {
return initial_rms;
}
/**
* Calculate mismatch quaternion (between the calculated difference of two scenes and
* previously known one. So if qscene1 - quaternion for scene1 (rot+scale), qscene2 -
* quaternion for the scene2 and qpair_inv - inverted known quaternion
* @param qscene1
* @param qscene2
* @param qpair12
* @param qderivs
* @return
*/
public static double [] getPairErrQuaternion(
double [] qscene1,
double [] qscene2,
double [] qpair12,
double [][][] qderivs) {
double [] iqscene2 = QuatUtils.invert(qscene2);
double [] iq2q1 = QuatUtils.multiply(iqscene2, qscene1);
double [] q2iq1q12 = QuatUtils.multiply(iq2q1, qpair12);
if (qderivs != null) {
double [][] diqscene2_dq2 = QuatUtils.d_invert_dq(qscene2);
double [][] diq2q1_dq2 = QuatUtils.matMult(QuatUtils.d_pq_dp(iqscene2,qscene1), diqscene2_dq2);
double [][] dq2iq1q12_dq2 = QuatUtils.matMult(QuatUtils.d_pq_dp(iq2q1,qpair12),diq2q1_dq2);
double [][] diq2q1_dq1 = QuatUtils.d_pq_dq(iqscene2,qscene1);
double [][] dq2iq1q12_dq1 = QuatUtils.matMult(QuatUtils.d_pq_dp(iq2q1,qpair12),diq2q1_dq1);
qderivs[0] = dq2iq1q12_dq1;
qderivs[1] = dq2iq1q12_dq2;
}
return q2iq1q12;
}
public static double [] getPairScaleDirError(
double [] qscene1,
double [] qscene2,
double [] qpair12,
double [][][] derivs) {
double [][][] qderivs = (derivs != null) ? (new double [2][][]) : null;
double [] q2iq1q12 = getPairErrQuaternion(
qscene1, // double [] qscene1,
qscene2, // double [] qscene2,
qpair12, // double [] qpair12,
qderivs); // double [][][] qderivs)
double [] q2iq1q12_norm=QuatUtils.normalize(q2iq1q12); // use q1,q2,q3, maybe q1 and q2 (from DEM) lower weight than q3 (from images)
double scale_diff = QuatUtils.norm(q2iq1q12)-1.0;
if (derivs != null) {
double [][] dq2iq1q12_norm = QuatUtils.dnormalize_dq(q2iq1q12_norm);
double [] dscale = QuatUtils.dscale_dq(q2iq1q12_norm);
dq2iq1q12_norm[0] = dscale; // replace first row (q_norm/dq) with dsca/e/dq
derivs[0] = QuatUtils.matMult(dq2iq1q12_norm,qderivs[0]); // /dq1 (scene 1)
derivs[1] = QuatUtils.matMult(dq2iq1q12_norm,qderivs[1]); // /dq1 (scene 2)
}
return new double [] {scale_diff, q2iq1q12_norm[1], q2iq1q12_norm[2], q2iq1q12_norm[3]};
}
}
...@@ -48,6 +48,13 @@ public class OrthoAltitudeMatch { ...@@ -48,6 +48,13 @@ public class OrthoAltitudeMatch {
double alt_outliers, // 0.05; Remove outliers when fitting planes, removed fraction. double alt_outliers, // 0.05; Remove outliers when fitting planes, removed fraction.
int alt_refine, // 1; Refine altitude difference plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...) int alt_refine, // 1; Refine altitude difference plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...)
double metric_error, double metric_error,
double weight_rot, // >0 weight of pairs errors in qn3
double weight_tilt, // >0 weight of pairs errors in qn1, qn2
double weight_scale, // >0 weight in pairs scale-1.0 errors
double pull, // 0 <= pull <1 - fraction of all RMS contributors
double pull_rots, // >=0 weight of sum of rotations, may be 0, normalized by pull value
double pull_tilts, // >=0 weights of sum of qn1 and qn2 of scenes, normalized by pull value
double pull_scales, // >=0 weights of scales of scenes, normalized by pull value
// log/save parameters // log/save parameters
boolean save_each, boolean save_each,
boolean log_append, boolean log_append,
...@@ -150,6 +157,7 @@ public class OrthoAltitudeMatch { ...@@ -150,6 +157,7 @@ public class OrthoAltitudeMatch {
Arrays.fill(weights_scenes, 1.0); Arrays.fill(weights_scenes, 1.0);
Arrays.fill(weights_pairs, 1.0); Arrays.fill(weights_pairs, 1.0);
double [][][] scene_tilts_pairs = new double [num_pairs][2][]; double [][][] scene_tilts_pairs = new double [num_pairs][2][];
double [][] quat_pairs = new double [num_pairs][]; // pairwise quaternions combining 2 axes from tilts, rotation and scale - from affine
double [][][] affine_pairs = new double [num_pairs][][]; double [][][] affine_pairs = new double [num_pairs][][];
double [] flat_err = new double[num_pairs]; double [] flat_err = new double[num_pairs];
double [] overlaps = new double [num_pairs]; double [] overlaps = new double [num_pairs];
...@@ -282,6 +290,10 @@ public class OrthoAltitudeMatch { ...@@ -282,6 +290,10 @@ public class OrthoAltitudeMatch {
affine_pairs[npair] = pairwiseOrthoMatch.getAffine(); // contains scale + rot affine_pairs[npair] = pairwiseOrthoMatch.getAffine(); // contains scale + rot
flat_err[npair] = Math.sqrt(tilt_err2); flat_err[npair] = Math.sqrt(tilt_err2);
overlaps[npair] = pairwiseOrthoMatch.getOverlap(); overlaps[npair] = pairwiseOrthoMatch.getOverlap();
quat_pairs[npair] = QuatUtils.sceneRelLocalGround(
alt_data, // double [] txy, differential tilts
affine_pairs[npair], // double [][] affine,
y_down_ccw); // boolean y_down_ccw)
if (!test_quat2) { if (!test_quat2) {
continue; continue;
} }
...@@ -629,8 +641,6 @@ public class OrthoAltitudeMatch { ...@@ -629,8 +641,6 @@ public class OrthoAltitudeMatch {
double [][][][] raffines_pm = new double [affines.length][2][][]; double [][][][] raffines_pm = new double [affines.length][2][][];
SingularValueDecomposition[][] rsvd_pm = new SingularValueDecomposition [affines.length][2]; SingularValueDecomposition[][] rsvd_pm = new SingularValueDecomposition [affines.length][2];
for (int i = 0; i < raffines_pm.length;i++) { for (int i = 0; i < raffines_pm.length;i++) {
// raffines_pm[i][0] = QuatUtils.quatToAffine(QuatUtils.affineToQuatScaled(affines[i], false, y_down_ccw)[0],false,true,true);
// raffines_pm[i][1] = QuatUtils.quatToAffine(QuatUtils.affineToQuatScaled(affines[i], false, y_down_ccw)[1],false,false,true);
raffines_pm[i][0] = QuatUtils.quatToAffine(QuatUtils.affineToQuatScaled(affines[i], false, y_down_ccw)[0],invert_q2a,true); raffines_pm[i][0] = QuatUtils.quatToAffine(QuatUtils.affineToQuatScaled(affines[i], false, y_down_ccw)[0],invert_q2a,true);
raffines_pm[i][1] = QuatUtils.quatToAffine(QuatUtils.affineToQuatScaled(affines[i], false, y_down_ccw)[1],invert_q2a,true); raffines_pm[i][1] = QuatUtils.quatToAffine(QuatUtils.affineToQuatScaled(affines[i], false, y_down_ccw)[1],invert_q2a,true);
rsvd_pm[i][0] = SingularValueDecomposition.singularValueDecomposeScaleTiltGamma(raffines_pm[i][0], y_down_ccw); rsvd_pm[i][0] = SingularValueDecomposition.singularValueDecomposeScaleTiltGamma(raffines_pm[i][0], y_down_ccw);
...@@ -878,13 +888,90 @@ public class OrthoAltitudeMatch { ...@@ -878,13 +888,90 @@ public class OrthoAltitudeMatch {
// double tilt_err_threshold = 0.01; // reduce weight if larger // double tilt_err_threshold = 0.01; // reduce weight if larger
//overlop_pow = 2.0 //overlop_pow = 2.0
for (int npair = 0; npair <quat_pairs.length; npair++ ) {
System.out.println("quat_pairs["+npair+"]="+QuatUtils.toString(quat_pairs[npair],use_degrees));
}
double [] quat02 = QuatUtils.multiplyScaled(quat_pairs[0],quat_pairs[2]);
System.out.println( "quat02= "+QuatUtils.toString(quat02,use_degrees));
double [] quat_err = QuatUtils.multiplyScaled(quat02, QuatUtils.invertScaled(quat_pairs[1]));
System.out.println( "quat_err= "+QuatUtils.toString(quat_err,use_degrees));
double quat02_norm = QuatUtils.norm(quat02);
double quat02_offs = Math.sqrt(quat02[1]*quat02[1]+quat02[2]*quat02[2]+quat02[3]*quat02[3]);
double quat02_orient = quat02_offs/quat02_norm;
double quat02_scale = quat02_norm-1.0;
double quat_err_norm = QuatUtils.norm(quat_err);
double quat_err_offs = Math.sqrt(quat_err[1]*quat_err[1]+quat_err[2]*quat_err[2]+quat_err[3]*quat_err[3]);
double orient_err = quat_err_offs/quat_err_norm;
double scale_err = quat_err_norm-1.0;
System.out.println("quat02_orient="+quat02_orient);
System.out.println("quat02_scale= "+quat02_scale);
System.out.println("orient_err="+orient_err);
System.out.println("scale_err= "+scale_err);
System.out.println("orient_err_rel="+orient_err/quat02_orient);
System.out.println("scale_err_rel= "+scale_err/quat02_scale);
for (int npair = 0; npair <quat_pairs.length; npair++ ) {
SingularValueDecomposition svd = SingularValueDecomposition.singularValueDecomposeScaleTiltGamma(affine_pairs[npair], y_down_ccw); // boolean y_down_ccw)
System.out.println("svd_affine["+npair+"]=" + svd.toString(use_degrees));
}
double max_flat_err = 0;//tilt_err_threshold double max_flat_err = 0; // tilt_err_threshold
for (int npair = 0; npair < num_pairs; npair++) { for (int npair = 0; npair < num_pairs; npair++) {
max_flat_err = Math.max(max_flat_err, flat_err[npair]); max_flat_err = Math.max(max_flat_err, flat_err[npair]);
//weights_pairs //weights_pairs
weights_pairs[npair] = tilt_err_threshold/Math.max(tilt_err_threshold, flat_err[npair])*Math.pow(overlaps[npair], overlop_pow); weights_pairs[npair] = tilt_err_threshold/Math.max(tilt_err_threshold, flat_err[npair])*Math.pow(overlaps[npair], overlop_pow);
} }
System.out.println("max_flat_err="+max_flat_err);
OrientationSceneLMA orientationSceneLMA = new OrientationSceneLMA();
orientationSceneLMA.prepareLMA(
indices, // int [] indices, // should all be used
condensed_pairs, // int [][] cpairs,
weights_pairs, // double [] weights_pairs, // from matching tilts(flatness) (and worst sfm, per-pair rmse)?
weight_rot, // double weight_rot, // >0 weight of pairs errors in qn3
weight_tilt, // double weight_tilt, // >0 weight of pairs errors in qn1, qn2
weight_scale, // double weight_scale, // >0 weight in pairs scale-1.0 errors
pull, // double pull, // 0 <= pull <1 - fraction of all RMS contributors
pull_rots, // double pull_rots, // >=0 weight of sum of rotations, may be 0, normalized by pull value
pull_tilts, // double pull_tilts, // >=0 weights of sum of qn1 and qn2 of scenes, normalized by pull value
pull_scales, // double pull_scales, // >=0 weights of scales of scenes, normalized by pull value
quat_pairs, // double [][] qpairs, // [pair][4] quaternions for pairs - orientation and scale (non-unity quaternions)
debugLevel); // int debug_level);
double lambda = 0.1;
double lambda_scale_good = 0.5;
double lambda_scale_bad = 8.0;
double lambda_max = 1000;
boolean last_run = false;
double rms_diff = 0.0001;
int num_iter = 100;
int lma_rslt= orientationSceneLMA.runLma( // <0 - failed, >=0 iteration number (1 - immediately)
lambda, // double lambda, // 0.1
lambda_scale_good,// double lambda_scale_good,// 0.5
lambda_scale_bad, // double lambda_scale_bad, // 8.0
lambda_max, // double lambda_max, // 100
rms_diff, // double rms_diff, // 0.001
num_iter, //int num_iter, // 20
last_run, // boolean last_run,
null, // String dbg_prefix,
debugLevel); // int debug_level)
System.out.println("LMA -> "+lma_rslt);
if (lma_rslt >= 0) {
if (debugLevel > -3) {
//ersTiltLMA.printSceneResults(use_degrees, use_percents);
//ersTiltLMA.printPairsResults(use_degrees, use_percents);
}
}
System.out.println();
ERSTiltLMA ersTiltLMA = new ERSTiltLMA(); ERSTiltLMA ersTiltLMA = new ERSTiltLMA();
ersTiltLMA.prepareLMA( ersTiltLMA.prepareLMA(
...@@ -896,15 +983,16 @@ public class OrthoAltitudeMatch { ...@@ -896,15 +983,16 @@ public class OrthoAltitudeMatch {
scene_tilts_pairs, // double [][][] tilts, // [pair][scene(2)][tilt(2)] scene_tilts_pairs, // double [][][] tilts, // [pair][scene(2)][tilt(2)]
affine_pairs, // double [][][] affine_pairs, affine_pairs, // double [][][] affine_pairs,
debugLevel); // int debug_level) debugLevel); // int debug_level)
/*
double lambda = 0.1; double lambda = 0.1;
double lambda_scale_good = 0.5; double lambda_scale_good = 0.5;
double lambda_scale_bad = 8.0; double lambda_scale_bad = 8.0;
double lambda_max = 1000; double lambda_max = 1000;
boolean last_run = false; boolean last_run = false;
double rms_diff = 0.0001; double rms_diff = 0.0001;
int num_iter = 100; int num_iter = 100;
int lma_rslt=ersTiltLMA.runLma( // <0 - failed, >=0 iteration number (1 - immediately) */
lma_rslt=ersTiltLMA.runLma( // <0 - failed, >=0 iteration number (1 - immediately)
lambda, // double lambda, // 0.1 lambda, // double lambda, // 0.1
lambda_scale_good,// double lambda_scale_good,// 0.5 lambda_scale_good,// double lambda_scale_good,// 0.5
lambda_scale_bad, // double lambda_scale_bad, // 8.0 lambda_scale_bad, // double lambda_scale_bad, // 8.0
......
...@@ -6350,6 +6350,14 @@ public class OrthoMapsCollection implements Serializable{ ...@@ -6350,6 +6350,14 @@ public class OrthoMapsCollection implements Serializable{
double metric_err = clt_parameters.imp.pmtch_metric_err;// 0.05; // 0.02;// 2 cm double metric_err = clt_parameters.imp.pmtch_metric_err;// 0.05; // 0.02;// 2 cm
double weight_rot = clt_parameters.imp.alt_weight_rot; // >0 weight of pairs errors in qn3
double weight_tilt = clt_parameters.imp.alt_weight_tilt; // >0 weight of pairs errors in qn1, qn2
double weight_scale = clt_parameters.imp.alt_weight_scale; // >0 weight in pairs scale-1.0 errors
double pull = clt_parameters.imp.alt_pull; // 0 <= pull <1 - fraction of all RMS contributors
double pull_rots = clt_parameters.imp.alt_pull_rots; // >=0 weight of sum of rotations, may be 0, normalized by pull value
double pull_tilts = clt_parameters.imp.alt_pull_tilts; // >=0 weights of sum of qn1 and qn2 of scenes, normalized by pull value
double pull_scales = clt_parameters.imp.alt_pull_scales; // >=0 weights of scales of scenes, normalized by pull value
// log/save parameters // log/save parameters
boolean save_each = clt_parameters.imp.pwise_save_each; // save state file after each match boolean save_each = clt_parameters.imp.pwise_save_each; // save state file after each match
boolean log_append = clt_parameters.imp.pwise_log_append; // boolean log_append = clt_parameters.imp.pwise_log_append; //
...@@ -6519,13 +6527,19 @@ public class OrthoMapsCollection implements Serializable{ ...@@ -6519,13 +6527,19 @@ public class OrthoMapsCollection implements Serializable{
alt_outliers, // double alt_outliers, // 0.05; Remove outliers when fitting planes, removed fraction. alt_outliers, // double alt_outliers, // 0.05; Remove outliers when fitting planes, removed fraction.
alt_refine, // int alt_refine, // 1; Refine altitude difference plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...) alt_refine, // int alt_refine, // 1; Refine altitude difference plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...)
metric_err, // double metric_error, metric_err, // double metric_error,
weight_rot, // double weight_rot, // >0 weight of pairs errors in qn3
weight_tilt, // double weight_tilt, // >0 weight of pairs errors in qn1, qn2
weight_scale, // double weight_scale, // >0 weight in pairs scale-1.0 errors
pull, // double pull, // 0 <= pull <1 - fraction of all RMS contributors
pull_rots, // double pull_rots, // >=0 weight of sum of rotations, may be 0, normalized by pull value
pull_tilts, // double pull_tilts, // >=0 weights of sum of qn1 and qn2 of scenes, normalized by pull value
pull_scales, // double pull_scales, // >=0 weights of scales of scenes, normalized by pull value
// log/save parameters // log/save parameters
save_each, // boolean save_each, save_each, // boolean save_each,
log_append, // boolean log_append, log_append, // boolean log_append,
log_path, // String log_path, log_path, // String log_path,
orthoMapsCollection_path, // String orthoMapsCollection_path, orthoMapsCollection_path, // String orthoMapsCollection_path,
debugLevel); // int debugLevel) debugLevel); // int debugLevel)
} }
public boolean displayScenePairs( public boolean displayScenePairs(
......
...@@ -67,8 +67,127 @@ public final class QuatUtils { // static class ...@@ -67,8 +67,127 @@ public final class QuatUtils { // static class
r[0]*s[3] - r[1]*s[2] + r[2]*s[1] + r[3]*s[0]}; r[0]*s[3] - r[1]*s[2] + r[2]*s[1] + r[3]*s[0]};
return t; return t;
} }
/**
* Differential d_(p*q)/dp
* https://faculty.sites.iastate.edu/jia/files/inline-files/quaternion.pdf
* @param p quaternion
* @param q quaternion
* @return matrix[4][4], where rows correspond to elements of quaternion (p*q), and columns - indices of p,
* so this matrix when multiplied by a column vector dp results in column vector d(pq)
*/
public static double [][] d_pq_dp(
double [] p,
double [] q) {
return new double[][] {
{q[0],-q[1],-q[2], -q[3]},
{q[1], q[0], q[3], -q[2]},
{q[2],-q[3], q[0], q[1]},
{q[3], q[2],-q[1], q[0]}
};
}
public static double [] multiplyScaled( /**
* Differential d_(p*q)/dq
* https://faculty.sites.iastate.edu/jia/files/inline-files/quaternion.pdf
* @param p quaternion
* @param q quaternion
* @return matrix[4][4], where rows correspond to elements of quaternion (p*q), and columns - indices of q,
* so this matrix when multiplied by a column vector dq results in column vector d(pq)
*/
public static double [][] d_pq_dq(
double [] p,
double [] q) {
return new double[][] {
{p[0],-p[1],-p[2], -p[3]},
{p[1], p[0],-p[3], p[2]},
{p[2], p[3], p[0], -p[1]},
{p[3],-p[2], p[1], p[0]}
};
}
/**
* Derivative of inverse quanternion
* @param q quternion
* @return derivative as 4x4 array
*/
public static double [][] d_invert_dq (
double [] q){
double q0= q[0],q1=q[1],q2=q[2],q3=q[3];
double l2 =q0*q0+q1*q1+q2*q2+q3*q3;
double l2l2=l2*l2;
return new double[][] {
{ (l2 - 2*q0*q0)/l2l2, -2*q0*q1/l2l2, -2*q0*q2/l2l2, -2*q0*q3/l2l2},
{ 2*q1*q0/ l2l2, (2*q1*q1 - l2)/l2l2, 2*q1*q2/l2l2, 2*q1*q3/l2l2},
{ 2*q2*q0/ l2l2, 2*q2*q1/l2l2, (2*q2*q2 - l2)/l2l2, 2*q2*q3/l2l2},
{ 2*q3*q0/ l2l2, 2*q3*q1/l2l2, 2*q3*q2/l2l2, (2*q3*q3 - l2)/l2l2},
};
}
public static double [][] dnormalize_dq(
double [] q){
double q0= q[0],q1=q[1],q2=q[2],q3=q[3];
double l2 =q0*q0+q1*q1+q2*q2+q3*q3;
double l3=l2*Math.sqrt(l2);
return new double[][] {
{(l2 - q0*q0)/l3, -q0*q1 /l3, -q0*q2 /l3, -q0*q3 /l3},
{ -q1*q0 /l3, (l2 - q1*q1)/l3, -q1*q2 /l3, -q1*q3 /l3},
{ -q2*q0 /l3, -q2*q1 /l3, (l2 - q2*q2)/l3, -q2*q3 /l3},
{ -q3*q0 /l3, -q3*q1 /l3, -q3*q2 /l3, (l2 - q3*q3)/l3}
};
}
public static double [] dscale_dq(
double [] q){
double q0= q[0],q1=q[1],q2=q[2],q3=q[3];
double l = Math.sqrt(q0*q0+q1*q1+q2*q2+q3*q3);
return new double [] {q0/l, q1/l, q2/l,q3/l};
}
/**
* Differential d_(p*q'))/dp (q' - q-conjugated)
* https://faculty.sites.iastate.edu/jia/files/inline-files/quaternion.pdf
* @param p quaternion
* @param q quaternion
* @return matrix[4][4], where rows correspond to elements of quaternion (p*q), and columns - indices of p,
* so this matrix when multiplied by a column vector dp results in column vector d(pq')
*/
/*
public static double [][] d_pqc_dp(
double [] p,
double [] q) {
return new double[][] {
{ q[0], q[1], q[2], q[3]},
{-q[1], q[0],-q[3], q[2]},
{-q[2], q[3], q[0], -q[1]},
{-q[3],-q[2], q[1], q[0]}
};
}
*/
/**
* Differential d_(p*q')/dq (q' - q-conjugated)
* https://faculty.sites.iastate.edu/jia/files/inline-files/quaternion.pdf
* @param p quaternion
* @param q quaternion
* @return matrix[4][4], where rows correspond to elements of quaternion (p*q), and columns - indices of q,
* so this matrix when multiplied by a column vector dq results in column vector d(pq)
*/
/*
public static double [][] d_pqc_dq(
double [] p,
double [] q) {
return new double[][] {
{p[0], p[1], p[2], p[3]},
{p[1],-p[0], p[3], -p[2]},
{p[2],-p[3],-p[0], p[1]},
{p[3], p[2],-p[1], -p[0]}
};
}
*/
public static double [] multiplyScaled( // Not needed, same result as multiply()
double [] r, double [] r,
double [] s ) { double [] s ) {
return scale(multiply(normalize(r),normalize(s)),norm(r)*norm(s)); return scale(multiply(normalize(r),normalize(s)),norm(r)*norm(s));
...@@ -79,6 +198,7 @@ public final class QuatUtils { // static class ...@@ -79,6 +198,7 @@ public final class QuatUtils { // static class
} }
public static double [] divide( // pure rotation public static double [] divide( // pure rotation
double [] r, double [] r,
double [] s ) { double [] s ) {
...@@ -296,11 +416,19 @@ public final class QuatUtils { // static class ...@@ -296,11 +416,19 @@ public final class QuatUtils { // static class
double [] quat) { double [] quat) {
return Math.sqrt(quat[0]*quat[0]+quat[1]*quat[1]+quat[2]*quat[2]+quat[3]*quat[3]); return Math.sqrt(quat[0]*quat[0]+quat[1]*quat[1]+quat[2]*quat[2]+quat[3]*quat[3]);
} }
public static double [] invert( public static double [] invert0(
double [] quat) { double [] quat) {
return new double [] {quat[0], -quat[1], -quat[2], -quat[3]}; return new double [] {quat[0], -quat[1], -quat[2], -quat[3]};
} }
public static double [] invert( // make sure did not break anything
double [] quat) {
double s2 = quat[0]*quat[0]+quat[1]*quat[1]+quat[2]*quat[2]+quat[3]*quat[3];
return scale(new double [] {quat[0], -quat[1], -quat[2], -quat[3]},1/s2);
}
public static double [] normalize( public static double [] normalize(
double [] quat) { double [] quat) {
// double k = 1/norm(quat); // double k = 1/norm(quat);
......
...@@ -175,6 +175,13 @@ public class IntersceneMatchParameters { ...@@ -175,6 +175,13 @@ public class IntersceneMatchParameters {
public double alt_outliers = 0.1; // remove outliers when fitting planes public double alt_outliers = 0.1; // remove outliers when fitting planes
public int alt_refine= 1; // refine plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...) public int alt_refine= 1; // refine plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...)
public double alt_weight_rot = 1.0; // >0 weight of pairs errors in qn3
public double alt_weight_tilt = 0.3; // >0 weight of pairs errors in qn1, qn2
public double alt_weight_scale = 0.1; // >0 weight in pairs scale-1.0 errors
public double alt_pull = 0.2; // 0 <= pull <1 - fraction of all RMS contributors
public double alt_pull_rots = 1.0; // >=0 weight of sum of rotations, may be 0, normalized by pull value
public double alt_pull_tilts = 0.3; // >=0 weights of sum of qn1 and qn2 of scenes, normalized by pull value
public double alt_pull_scales = 0.1; // >=0 weights of scales of scenes, normalized by pull value
public boolean pmap_move_only = false; public boolean pmap_move_only = false;
public boolean pmap_ignore_affines = false; public boolean pmap_ignore_affines = false;
...@@ -930,6 +937,15 @@ min_str_neib_fpn 0.35 ...@@ -930,6 +937,15 @@ min_str_neib_fpn 0.35
gd.addNumericField("Fraction of ouliers", this.alt_outliers, 3,7,"", "Remove outliers when fitting planes, removed fraction."); gd.addNumericField("Fraction of ouliers", this.alt_outliers, 3,7,"", "Remove outliers when fitting planes, removed fraction.");
gd.addNumericField("Number of alt plane refines", this.alt_refine, 0,3,"", "Refine altitude difference plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...)"); gd.addNumericField("Number of alt plane refines", this.alt_refine, 0,3,"", "Refine altitude difference plane after removing outliers (0 - no outlier removal, 1 - remove outliers and refine once, ...)");
gd.addMessage ("Parameters for scenes orientation LMA fitting");
gd.addNumericField("Rotation mismatches weights", this.alt_weight_rot, 3,7,"", "Weights of rotation mismatches.");
gd.addNumericField("Tilt mismatches weights", this.alt_weight_tilt, 3,7,"", "Weights of tilt mismatches.");
gd.addNumericField("Scale mismatches weights", this.alt_weight_scale, 3,7,"","Weights of scale mismatches.");
gd.addNumericField("Pull strength", this.alt_pull, 3,7,"", "Relative weight of combined regularization pull (0<=pull<1.0)");
gd.addNumericField("Rotations pull", this.alt_pull_rots, 3,7,"", "Weight of sum of all rotations pull to zero - will be normalized to make sum of pulls a specified fraction of totla weights ");
gd.addNumericField("Tilts pull", this.alt_pull_tilts, 3,7,"", "Weight of sum of all tilts pull to zero - will be normalized to make sum of pulls a specified fraction of totla weights ");
gd.addNumericField("Scales pull", this.alt_pull_scales, 3,7,"", "Weight of sum of all scales pull to zero - will be normalized to make sum of pulls a specified fraction of totla weights ");
gd.addMessage ("Build map with LMA from pairwise matches"); gd.addMessage ("Build map with LMA from pairwise matches");
gd.addCheckbox ("Moves only", this.pmap_move_only, "Moves only, no affine transform."); gd.addCheckbox ("Moves only", this.pmap_move_only, "Moves only, no affine transform.");
gd.addCheckbox ("Ignore existing affines", this.pmap_ignore_affines, "Start from unity matrices, ignore saved affines."); gd.addCheckbox ("Ignore existing affines", this.pmap_ignore_affines, "Start from unity matrices, ignore saved affines.");
...@@ -1935,6 +1951,14 @@ min_str_neib_fpn 0.35 ...@@ -1935,6 +1951,14 @@ min_str_neib_fpn 0.35
this.alt_outliers = gd.getNextNumber(); this.alt_outliers = gd.getNextNumber();
this.alt_refine = (int) gd.getNextNumber(); this.alt_refine = (int) gd.getNextNumber();
this.alt_weight_rot = gd.getNextNumber();
this.alt_weight_tilt = gd.getNextNumber();
this.alt_weight_scale = gd.getNextNumber();
this.alt_pull = gd.getNextNumber();
this.alt_pull_rots = gd.getNextNumber();
this.alt_pull_tilts = gd.getNextNumber();
this.alt_pull_scales = gd.getNextNumber();
this.pmap_move_only = gd.getNextBoolean(); this.pmap_move_only = gd.getNextBoolean();
this.pmap_ignore_affines = gd.getNextBoolean(); this.pmap_ignore_affines = gd.getNextBoolean();
this.pmap_use_inv = gd.getNextBoolean(); this.pmap_use_inv = gd.getNextBoolean();
...@@ -2529,6 +2553,14 @@ min_str_neib_fpn 0.35 ...@@ -2529,6 +2553,14 @@ min_str_neib_fpn 0.35
properties.setProperty(prefix+"alt_outliers", this.alt_outliers + ""); // double properties.setProperty(prefix+"alt_outliers", this.alt_outliers + ""); // double
properties.setProperty(prefix+"alt_refine", this.alt_refine + ""); // int properties.setProperty(prefix+"alt_refine", this.alt_refine + ""); // int
properties.setProperty(prefix+"alt_weight_rot", this.alt_weight_rot + ""); // double
properties.setProperty(prefix+"alt_weight_tilt", this.alt_weight_tilt + ""); // double
properties.setProperty(prefix+"alt_weight_scale", this.alt_weight_scale + ""); // double
properties.setProperty(prefix+"alt_pull", this.alt_pull + ""); // double
properties.setProperty(prefix+"alt_pull_rots", this.alt_pull_rots + ""); // double
properties.setProperty(prefix+"alt_pull_tilts ", this.alt_pull_tilts + ""); // double
properties.setProperty(prefix+"alt_pull_scales", this.alt_pull_scales + ""); // double
properties.setProperty(prefix+"pmap_move_only", this.pmap_move_only + ""); // boolean properties.setProperty(prefix+"pmap_move_only", this.pmap_move_only + ""); // boolean
properties.setProperty(prefix+"pmap_ignore_affines", this.pmap_ignore_affines + ""); // boolean properties.setProperty(prefix+"pmap_ignore_affines", this.pmap_ignore_affines + ""); // boolean
properties.setProperty(prefix+"pmap_use_inv", this.pmap_use_inv + ""); // boolean properties.setProperty(prefix+"pmap_use_inv", this.pmap_use_inv + ""); // boolean
...@@ -3088,6 +3120,14 @@ min_str_neib_fpn 0.35 ...@@ -3088,6 +3120,14 @@ min_str_neib_fpn 0.35
if (properties.getProperty(prefix+"alt_outliers")!=null) this.alt_outliers=Double.parseDouble(properties.getProperty(prefix+ "alt_outliers")); if (properties.getProperty(prefix+"alt_outliers")!=null) this.alt_outliers=Double.parseDouble(properties.getProperty(prefix+ "alt_outliers"));
if (properties.getProperty(prefix+"alt_refine")!=null) this.alt_refine=Integer.parseInt(properties.getProperty(prefix+ "alt_refine")); if (properties.getProperty(prefix+"alt_refine")!=null) this.alt_refine=Integer.parseInt(properties.getProperty(prefix+ "alt_refine"));
if (properties.getProperty(prefix+"alt_weight_rot")!=null) this.alt_weight_rot=Double.parseDouble(properties.getProperty(prefix+ "alt_weight_rot"));
if (properties.getProperty(prefix+"alt_weight_tilt")!=null) this.alt_weight_tilt=Double.parseDouble(properties.getProperty(prefix+ "alt_weight_tilt"));
if (properties.getProperty(prefix+"alt_weight_scale")!=null) this.alt_weight_scale=Double.parseDouble(properties.getProperty(prefix+ "alt_weight_scale"));
if (properties.getProperty(prefix+"alt_pull")!=null) this.alt_pull=Double.parseDouble(properties.getProperty(prefix+ "alt_pull"));
if (properties.getProperty(prefix+"alt_pull_rots")!=null) this.alt_pull_rots=Double.parseDouble(properties.getProperty(prefix+ "alt_pull_rots"));
if (properties.getProperty(prefix+"alt_pull_tilts ")!=null) this.alt_pull_tilts =Double.parseDouble(properties.getProperty(prefix+ "alt_pull_tilts "));
if (properties.getProperty(prefix+"alt_pull_scales")!=null) this.alt_pull_scales=Double.parseDouble(properties.getProperty(prefix+ "alt_pull_scales"));
if (properties.getProperty(prefix+"pmap_move_only")!=null) this.pmap_move_only=Boolean.parseBoolean(properties.getProperty(prefix+ "pmap_move_only")); if (properties.getProperty(prefix+"pmap_move_only")!=null) this.pmap_move_only=Boolean.parseBoolean(properties.getProperty(prefix+ "pmap_move_only"));
if (properties.getProperty(prefix+"pmap_ignore_affines")!=null)this.pmap_ignore_affines=Boolean.parseBoolean(properties.getProperty(prefix+"pmap_ignore_affines")); if (properties.getProperty(prefix+"pmap_ignore_affines")!=null)this.pmap_ignore_affines=Boolean.parseBoolean(properties.getProperty(prefix+"pmap_ignore_affines"));
if (properties.getProperty(prefix+"pmap_use_inv")!=null) this.pmap_use_inv=Boolean.parseBoolean(properties.getProperty(prefix+ "pmap_use_inv")); if (properties.getProperty(prefix+"pmap_use_inv")!=null) this.pmap_use_inv=Boolean.parseBoolean(properties.getProperty(prefix+ "pmap_use_inv"));
...@@ -3668,6 +3708,14 @@ min_str_neib_fpn 0.35 ...@@ -3668,6 +3708,14 @@ min_str_neib_fpn 0.35
imp.alt_outliers = this.alt_outliers; imp.alt_outliers = this.alt_outliers;
imp.alt_refine = this.alt_refine; imp.alt_refine = this.alt_refine;
imp.alt_weight_rot = this.alt_weight_rot;
imp.alt_weight_tilt = this.alt_weight_tilt;
imp.alt_weight_scale = this.alt_weight_scale;
imp.alt_pull = this.alt_pull;
imp.alt_pull_rots = this.alt_pull_rots;
imp.alt_pull_tilts = this.alt_pull_tilts ;
imp.alt_pull_scales = this.alt_pull_scales;
imp.pmap_move_only = this.pmap_move_only; imp.pmap_move_only = this.pmap_move_only;
imp.pmap_ignore_affines = this.pmap_ignore_affines; imp.pmap_ignore_affines = this.pmap_ignore_affines;
imp.pmap_use_inv = this.pmap_use_inv; imp.pmap_use_inv = this.pmap_use_inv;
......
...@@ -2240,7 +2240,7 @@ public class QuaternionLma { ...@@ -2240,7 +2240,7 @@ public class QuaternionLma {
* (samples in LMA) and rows - to the second quaternion * (samples in LMA) and rows - to the second quaternion
* (missing from the input) components. * (missing from the input) components.
*/ */
public static double [][] composeDQ( public static double [][] composeDQ( // not used
double [] r) { double [] r) {
/* /*
return new double [][] { return new double [][] {
...@@ -2269,7 +2269,7 @@ public class QuaternionLma { ...@@ -2269,7 +2269,7 @@ public class QuaternionLma {
* (samples in LMA) and rows - to the source quaternion * (samples in LMA) and rows - to the source quaternion
* (missing from the input) components. * (missing from the input) components.
*/ */
public static double [][] composeDR( // not used public static double [][] composeDR(
double [] q) { double [] q) {
return new double [][] { return new double [][] {
{ q[0], -q[1], -q[2], -q[3]}, { q[0], -q[1], -q[2], -q[3]},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment