/** ** ** OrthoEqualizeLMA - Fit multiple scenes pixel values (temperature) ** linear transformations using already calculated pairwise ** transformations ** ** Copyright (C) 2024 Elphel, Inc. ** ** -----------------------------------------------------------------------------** ** ** OrthoMultiLMA.java is free software: you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation, either version 3 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program. If not, see <http://www.gnu.org/licenses/>. ** -----------------------------------------------------------------------------** ** */ package com.elphel.imagej.orthomosaic; import java.awt.Point; import java.io.IOException; import java.time.temporal.ChronoUnit; import java.time.temporal.TemporalUnit; import java.util.ArrayList; import java.util.concurrent.atomic.AtomicInteger; import com.elphel.imagej.cameras.CLTParameters; import com.elphel.imagej.common.GenericJTabbedDialog; import com.elphel.imagej.tileprocessor.ImageDtt; import Jama.Matrix; public class OrthoEqualizeLMA { private double [] last_rms = null; // {rms, rms_pure}, matching this.vector private double [] good_or_bad_rms = null; // just for diagnostics, to read last (failed) rms private double [] initial_rms = null; // {rms, rms_pure}, first-calcualted rms private double [] parameters_vector = null; private double [] y_vector = null; private double pure_weight; // weight of samples only as a fraction of total weight private double [] weights; // normalized so sum is 1.0 for all - samples and extra regularization private double [] last_ymfx = null; private double [][] last_jt = null; private PairwiseOrthoMatch [][] matches = null; private int [][] pairs = null; private int [][] pairs_ni = null; // non-intersecting pairs for multi-threaded processing private int [] indices = null; private int num_scenes = 0; private int num_pairs = 0; public static int buildEqualize( CLTParameters clt_parameters, OrthoMapsCollection maps_collection, String orthoMapsCollection_path) { boolean ignore_equalize= clt_parameters.imp.pequ_ignore_equalize; // false; // ignore previous equalization boolean use_inv = clt_parameters.imp.pequ_use_inv; // false; double scale_weight = clt_parameters.imp.pequ_scale_weight; // 500; // relative weight of scale differences compared to offset differences double pull_weight = clt_parameters.imp.pequ_pull_weight; // 0.001; // relative weight of offsets and scales differences from 1.0 to pairs mismatch double half_weight_sec = clt_parameters.imp.pequ_half_weight_sec; // 300.0; // - time difference to reduce weight twice double min_weight_sec = clt_parameters.imp.pequ_min_weight_sec; // 0.01; // weight of pairs at very different time double overlap_pow = clt_parameters.imp.pequ_overlap_pow; // 2.0; // match weight as overlap fraction to this power double rms_diff = clt_parameters.imp.pequ_rms_diff; // 0.000001; int num_iter = clt_parameters.imp.pequ_num_iter; // 100; // 50; int debugLevel = clt_parameters.imp.pequ_debugLevel; // 2; GenericJTabbedDialog gd = new GenericJTabbedDialog("Pairwise Match Parameters",1200,400); gd.addCheckbox ("Ignore existing equalization",ignore_equalize, "Ignore existing equalization, start from scratch (a=1, b=0)."); gd.addCheckbox ("Use reversed pairs", use_inv, "Use reversed (late-early timestamps) pairs."); gd.addNumericField("Relative scale weight", scale_weight, 3,7,"", "Importance of a-coefficient (scale) relative to b-coefficient (offset)."); gd.addNumericField("Pull weight", pull_weight, 3,7,"", "Relative weight of offsets and scales differences from 1.0 to pairs mismatch."); gd.addNumericField("Half-weight time difference", half_weight_sec, 3,7,"s", "Time difference (in a pair) to reduce weight twice."); gd.addNumericField("Weight for large time offset",min_weight_sec, 3,7,"", "Weight of pairs at very different time."); gd.addNumericField("Overlap inportance", overlap_pow, 3,7,"", "Raise overlap fraction (of the smaller image) to this power before using as weight."); gd.addNumericField("RMSE relative improvement", rms_diff, 8,10,"", "Relative RMSE improvement to exit LMA."); gd.addNumericField("LMA iterations", num_iter, 0,3,"",".Maximal number of the LMA iterations."); gd.addNumericField("Debug level for equalization",debugLevel, 0,3,"","Debug level for global (LMA-based) intensity equalization."); gd.showDialog(); if (gd.wasCanceled()) return -1; ignore_equalize = gd.getNextBoolean(); use_inv = gd.getNextBoolean(); scale_weight = gd.getNextNumber(); pull_weight = gd.getNextNumber(); half_weight_sec = gd.getNextNumber(); min_weight_sec = gd.getNextNumber(); overlap_pow = gd.getNextNumber(); rms_diff = gd.getNextNumber(); num_iter = (int) gd.getNextNumber(); debugLevel = (int) gd.getNextNumber(); return buildEqualize( clt_parameters, // CLTParameters clt_parameters, maps_collection, // OrthoMapsCollection maps_collection, orthoMapsCollection_path, // String orthoMapsCollection_path, ignore_equalize, // boolean ignore_equalize, use_inv, // boolean use_inv, scale_weight, // double scale_weight, pull_weight, // double pull_weight, half_weight_sec, // double half_weight_sec, min_weight_sec, // double min_weight_sec, overlap_pow, // double overlap_pow, rms_diff, // double rms_diff, num_iter, // int num_iter, debugLevel); // int debugLevel); } public static int buildEqualize( CLTParameters clt_parameters, OrthoMapsCollection maps_collection, String orthoMapsCollection_path, boolean ignore_equalize, boolean use_inv, double scale_weight, double pull_weight, double half_weight_sec, double min_weight_sec, double overlap_pow, double rms_diff, int num_iter, int debugLevel) { double lambda = 0.1; double lambda_scale_good = 0.5; double lambda_scale_bad = 8.0; double lambda_max = 100; boolean last_run = false; int [] indices = maps_collection.getScenesSelection( clt_parameters, // CLTParameters clt_parameters, " to build a map"); // String purpose) OrthoEqualizeLMA oel = new OrthoEqualizeLMA(); oel.prepareLMA ( clt_parameters, // CLTParameters clt_parameters, maps_collection, // OrthoMapsCollection maps_collection, indices, // int [] indices, ignore_equalize, // boolean ignore_equalization, // ignore previous equalization // scale_weights applies both to pairs mismatch and per-scene values difference from neutral {1.0, 0.0} scale_weight, // double scale_weight, // relative weight of scale differences compared to offset differences pull_weight, // double pull_weight, // relative weight of offsets and scales differences from 1.0 to pairs mismatch half_weight_sec, // double half_weight_sec, // 300 - time difference to reduce weight twice min_weight_sec, // double min_weight_sec, // 0.01 weight of pairs at very different time use_inv, // boolean use_inv, // use inverse pairs overlap_pow, // double overlap_pow, // match weight as overlap fraction to this power debugLevel); // int debugLevel); int lma_rslt = oel.runLma( // <0 - failed, >=0 iteration number (1 - immediately) lambda, // double lambda, // 0.1 lambda_scale_good, // double lambda_scale_good,// 0.5 lambda_scale_bad, // double lambda_scale_bad, // 8.0 lambda_max, // double lambda_max, // 100 rms_diff, // double rms_diff, // 0.001 num_iter, // int num_iter, // 20 last_run, // boolean last_run, null, // dbg_lma_prefix, // String dbg_prefix, debugLevel); // int debug_level) System.out.println("lma_rslt="+lma_rslt); double [] rms = oel.getRms(); double [] initial_rms = oel.getInitialRms(); if (debugLevel > 0) { System.out.println("LMA: full RMS="+rms[0]+" ("+initial_rms[0]+"), pure RMS="+rms[1]+" ("+initial_rms[1]+")"); } //Get and apply affines double [][] equalize = oel.getEqualize(); double [] fx = oel.getFx(); oel.updateEqualize(maps_collection); if (orthoMapsCollection_path != null) { try { maps_collection.writeOrthoMapsCollection(orthoMapsCollection_path); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } System.out.println("Saved data to "+ orthoMapsCollection_path); } return 0; } public double [] getParameters() { return parameters_vector; } public double [] getFx() { return getFxDerivs( parameters_vector, // double [] vector, null, // final double [][] jt, // should be null or initialized with [vector.length][] 0); // final int debug_level) } public int prepareLMA ( CLTParameters clt_parameters, OrthoMapsCollection maps_collection, int [] indices, // scale_weights applies both to pairs mismatch and per-scene values difference from neutral {1.0, 0.0} boolean ignore_equalization, // ignore previous equalization double scale_weight, // relative weight of scale differences compared to offset differences double pull_weight, // relative weight of offsets and scales differences from 1.0 to pairs mismatch double half_weight_sec, // 300 - time difference to reduce weight twice double min_weight_sec, // 0.01 weight of pairs at very different time boolean use_inv, // use inverse pairs double overlap_pow, // match weight as overlap fraction to this power int debugLevel) { this.indices = indices; num_scenes = indices.length; matches = new PairwiseOrthoMatch[indices.length][indices.length]; ArrayList<Point> pairs_list = new ArrayList<Point>(); for (int i = 0; i < num_scenes-1; i++) { int scene0 = indices[i]; for (int j = i+1; j < num_scenes; j++){ int scene1 = indices[j]; PairwiseOrthoMatch match = maps_collection.ortho_maps[scene0].getMatch(maps_collection.ortho_maps[scene1].getName()); PairwiseOrthoMatch inv_match = use_inv? maps_collection.ortho_maps[scene1].getMatch(maps_collection.ortho_maps[scene0].getName()):null; if ((match != null) || (inv_match != null)){ if (match == null) { double [] enuOffset = maps_collection.ortho_maps[scene0].enuOffsetTo(maps_collection.ortho_maps[scene1]); double [] rd = {enuOffset[0], -enuOffset[1]}; // {right,down} of the image match = inv_match.getInverse(rd); } if (inv_match == null) { double [] enuOffset = maps_collection.ortho_maps[scene1].enuOffsetTo(maps_collection.ortho_maps[scene0]); double [] rd = {enuOffset[0], -enuOffset[1]}; // {right,down} of the image inv_match = match.getInverse(rd); } } if ((match != null) && match.isSetEqualize2to1()) { matches[i][j] = match; matches[j][i] = inv_match; pairs_list.add(new Point(i,j)); // only once? } } } num_pairs = pairs_list.size(); pairs = new int[num_pairs][2]; for (int np = 0; np < num_pairs; np++) { Point pair = pairs_list.get(np); pairs[np] = new int[] {pair.x, pair.y}; } pairs_ni = OrthoMultiLMA.createNonIntersectingPairs(pairs); y_vector = new double [2*num_pairs + 2*num_scenes]; weights = new double [2*num_pairs + 2*num_scenes]; parameters_vector = new double [2*num_scenes]; int sec_24hrs = 60*60*24; double sw = 0; // pairwise matches for (int npair = 0; npair < num_pairs; npair++) { Point p = pairs_list.get(npair); PairwiseOrthoMatch match = matches[p.x][p.y]; y_vector[2*npair + 0] = match.equalize1to0[0]; y_vector[2*npair + 1] = match.equalize1to0[1]; double diff_seconds = Math.abs((int) ChronoUnit.SECONDS.between( maps_collection.ortho_maps[indices[p.x]].getLocalDateTime(), maps_collection.ortho_maps[indices[p.y]].getLocalDateTime() )) % sec_24hrs; if (diff_seconds > (sec_24hrs/2)) { diff_seconds = sec_24hrs-diff_seconds; } // weight reduction because of different capture time double weight_dt = Math.max(Math.pow(0.5, diff_seconds/half_weight_sec),min_weight_sec); // weight reduction because of partial overlap double weight_overlap = Math.pow(match.getOverlap(), overlap_pow); // scale_weight double w = weight_dt * weight_overlap; weights[2*npair + 0] = w * scale_weight; weights[2*npair + 1] = w; sw += w *(1 + scale_weight); } double swp = sw; // individual scene pulls to {1.0,0} for regularization for (int nscene = 0; nscene < num_scenes; nscene++) { double [] equalize = ignore_equalization? (new double[] {1.0,0.0}): maps_collection.ortho_maps[indices[nscene]].getEqualize(); int np = 2 * nscene; int indx = 2 * num_pairs + np; parameters_vector[np + 0] = equalize[0]; parameters_vector[np + 1] = equalize[1]; y_vector[indx + 0] = 1.0; y_vector[indx + 1] = 0.0; weights [indx + 0] = scale_weight * pull_weight; weights [indx + 1] = pull_weight; sw+= pull_weight * (1 + scale_weight); } pure_weight = swp/sw; double s = 1.0/sw; for (int i = 0; i <weights.length;i++) { weights[i] *=s; } last_jt = new double [parameters_vector.length][]; return weights.length; } /** * Converting to use available_pairs[][] instead of indices[] - not yet used * @param clt_parameters * @param maps_collection * @param available_pairs * @param ignore_equalization * @param scale_weight * @param pull_weight * @param half_weight_sec * @param min_weight_sec * @param use_inv * @param overlap_pow * @param debugLevel * @return */ public int prepareLMA ( CLTParameters clt_parameters, OrthoMapsCollection maps_collection, int [][] available_pairs, // scale_weights applies both to pairs mismatch and per-scene values difference from neutral {1.0, 0.0} boolean ignore_equalization, // ignore previous equalization double scale_weight, // relative weight of scale differences compared to offset differences double pull_weight, // relative weight of offsets and scales differences from 1.0 to pairs mismatch double half_weight_sec, // 300 - time difference to reduce weight twice double min_weight_sec, // 0.01 weight of pairs at very different time boolean use_inv, // use inverse pairs double overlap_pow, // match weight as overlap fraction to this power int debugLevel) { int [] indices = maps_collection.getScenesFromPairs( // may be shorter, each element - absolute scene number used in pairs available_pairs, // pairs_defined_abs,// int [][] pairs, null); // int [] indices_in) // preselected indices or null int [][] defined_pairs = maps_collection.condensePairs (available_pairs, indices); // pairs_defined_abs,indices); this.indices = indices; num_scenes = indices.length; matches = new PairwiseOrthoMatch[indices.length][indices.length]; ArrayList<Point> pairs_list = new ArrayList<Point>(); for (int i = 0; i < num_scenes-1; i++) { int scene0 = indices[i]; for (int j = i+1; j < num_scenes; j++){ int scene1 = indices[j]; PairwiseOrthoMatch match = maps_collection.ortho_maps[scene0].getMatch(maps_collection.ortho_maps[scene1].getName()); PairwiseOrthoMatch inv_match = use_inv? maps_collection.ortho_maps[scene1].getMatch(maps_collection.ortho_maps[scene0].getName()):null; if ((match != null) || (inv_match != null)){ if (match == null) { double [] enuOffset = maps_collection.ortho_maps[scene0].enuOffsetTo(maps_collection.ortho_maps[scene1]); double [] rd = {enuOffset[0], -enuOffset[1]}; // {right,down} of the image match = inv_match.getInverse(rd); } if (inv_match == null) { double [] enuOffset = maps_collection.ortho_maps[scene1].enuOffsetTo(maps_collection.ortho_maps[scene0]); double [] rd = {enuOffset[0], -enuOffset[1]}; // {right,down} of the image inv_match = match.getInverse(rd); } } if ((match != null) && match.isSetEqualize2to1()) { matches[i][j] = match; matches[j][i] = inv_match; pairs_list.add(new Point(i,j)); // only once? } } } num_pairs = pairs_list.size(); pairs = new int[num_pairs][2]; for (int np = 0; np < num_pairs; np++) { Point pair = pairs_list.get(np); pairs[np] = new int[] {pair.x, pair.y}; } pairs_ni = OrthoMultiLMA.createNonIntersectingPairs(pairs); y_vector = new double [2*num_pairs + 2*num_scenes]; weights = new double [2*num_pairs + 2*num_scenes]; parameters_vector = new double [2*num_scenes]; int sec_24hrs = 60*60*24; double sw = 0; // pairwise matches for (int npair = 0; npair < num_pairs; npair++) { Point p = pairs_list.get(npair); PairwiseOrthoMatch match = matches[p.x][p.y]; y_vector[2*npair + 0] = match.equalize1to0[0]; y_vector[2*npair + 1] = match.equalize1to0[1]; double diff_seconds = Math.abs((int) ChronoUnit.SECONDS.between( maps_collection.ortho_maps[indices[p.x]].getLocalDateTime(), maps_collection.ortho_maps[indices[p.y]].getLocalDateTime() )) % sec_24hrs; if (diff_seconds > (sec_24hrs/2)) { diff_seconds = sec_24hrs-diff_seconds; } // weight reduction because of different capture time double weight_dt = Math.max(Math.pow(0.5, diff_seconds/half_weight_sec),min_weight_sec); // weight reduction because of partial overlap double weight_overlap = Math.pow(match.getOverlap(), overlap_pow); // scale_weight double w = weight_dt * weight_overlap; weights[2*npair + 0] = w * scale_weight; weights[2*npair + 1] = w; sw += w *(1 + scale_weight); } double swp = sw; // individual scene pulls to {1.0,0} for regularization for (int nscene = 0; nscene < num_scenes; nscene++) { double [] equalize = ignore_equalization? (new double[] {1.0,0.0}): maps_collection.ortho_maps[indices[nscene]].getEqualize(); int np = 2 * nscene; int indx = 2 * num_pairs + np; parameters_vector[np + 0] = equalize[0]; parameters_vector[np + 1] = equalize[1]; y_vector[indx + 0] = 1.0; y_vector[indx + 1] = 0.0; weights [indx + 0] = scale_weight * pull_weight; weights [indx + 1] = pull_weight; sw+= pull_weight * (1 + scale_weight); } pure_weight = swp/sw; double s = 1.0/sw; for (int i = 0; i <weights.length;i++) { weights[i] *=s; } last_jt = new double [parameters_vector.length][]; return weights.length; } public int runLma( // <0 - failed, >=0 iteration number (1 - immediately) double lambda, // 0.1 double lambda_scale_good,// 0.5 double lambda_scale_bad, // 8.0 double lambda_max, // 100 double rms_diff, // 0.001 int num_iter, // 20 boolean last_run, String dbg_prefix, int debug_level) { boolean [] rslt = {false,false}; this.last_rms = null; // remove? int iter = 0; if (dbg_prefix != null) { // debugStateImage(dbg_prefix+"-initial"); } for (iter = 0; iter < num_iter; iter++) { rslt = lmaStep( lambda, rms_diff, debug_level); if (dbg_prefix != null) { // debugStateImage(dbg_prefix+"-step_"+iter); } if (rslt == null) { return -1; // false; // need to check } if (debug_level > 1) { System.out.println("LMA step "+iter+": {"+rslt[0]+","+rslt[1]+"} full RMS= "+good_or_bad_rms[0]+ " ("+initial_rms[0]+"), pure RMS="+good_or_bad_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda); } if (rslt[1]) { break; } if (rslt[0]) { // good lambda *= lambda_scale_good; } else { lambda *= lambda_scale_bad; if (lambda > lambda_max) { break; // not used in lwir } } } if (rslt[0]) { // better if (iter >= num_iter) { // better, but num tries exceeded if (debug_level > 1) System.out.println("Step "+iter+": Improved, but number of steps exceeded maximal"); } else { if (debug_level > 1) System.out.println("Step "+iter+": LMA: Success"); } } else { // improved over initial ? if (last_rms[0] < initial_rms[0]) { // NaN rslt[0] = true; if (debug_level > 1) System.out.println("Step "+iter+": Failed to converge, but result improved over initial"); } else { if (debug_level > 1) System.out.println("Step "+iter+": Failed to converge"); } } boolean show_intermediate = true; if (show_intermediate && (debug_level > 0)) { System.out.println("LMA: full RMS="+last_rms[0]+" ("+initial_rms[0]+"), pure RMS="+last_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda); } if (debug_level > 2){ System.out.println("iteration="+iter); } if (debug_level > 0) { if ((debug_level > 1) || last_run) { // (iter == 1) || last_run) { if (!show_intermediate) { System.out.println("LMA: iter="+iter+", full RMS="+last_rms[0]+" ("+initial_rms[0]+"), pure RMS="+last_rms[1]+" ("+initial_rms[1]+") + lambda="+lambda); } } } if ((debug_level > -2) && !rslt[0]) { // failed if ((debug_level > 1) || (iter == 1) || last_run) { System.out.println("LMA failed on iteration = "+iter); } System.out.println(); } return rslt[0]? iter : -1; } private boolean [] lmaStep( double lambda, double rms_diff, int debug_level) { boolean [] rslt = {false,false}; // maybe the following if() branch is not needed - already done in prepareLMA ! if (this.last_rms == null) { //first time, need to calculate all (vector is valid) last_rms = new double[2]; if (debug_level > 1) { System.out.println("lmaStep(): first step"); } double [] fx = getFxDerivs( parameters_vector, // double [] vector, last_jt, // final double [][] jt, // should be null or initialized with [vector.length][] debug_level); // final int debug_level) last_ymfx = getYminusFxWeighted( fx, // final double [] fx, last_rms); // final double [] rms_fp // null or [2] this.initial_rms = this.last_rms.clone(); this.good_or_bad_rms = this.last_rms.clone(); if (debug_level > -1) { // temporary /* dbgYminusFxWeight( this.last_ymfx, this.weights, "Initial_y-fX_after_moving_objects"); */ } if (last_ymfx == null) { return null; // need to re-init/restart LMA } // TODO: Restore/implement if (debug_level > 3) { /* dbgJacobians( corr_vector, // GeometryCorrection.CorrVector corr_vector, 1E-5, // double delta, true); //boolean graphic) */ } } Matrix y_minus_fx_weighted = new Matrix(this.last_ymfx, this.last_ymfx.length); Matrix wjtjlambda = new Matrix(getWJtJlambda( lambda, // *10, // temporary this.last_jt)); // double [][] jt) if (debug_level>2) { System.out.println("JtJ + lambda*diag(JtJ"); wjtjlambda.print(18, 6); } Matrix jtjl_inv = null; try { jtjl_inv = wjtjlambda.inverse(); // check for errors } catch (RuntimeException e) { rslt[1] = true; if (debug_level > 0) { System.out.println("Singular Matrix!"); } return rslt; } if (debug_level>2) { System.out.println("(JtJ + lambda*diag(JtJ).inv()"); jtjl_inv.print(18, 6); } //last_jt has NaNs Matrix jty = (new Matrix(this.last_jt)).times(y_minus_fx_weighted); if (debug_level>2) { System.out.println("Jt * (y-fx)"); jty.print(18, 6); } Matrix mdelta = jtjl_inv.times(jty); if (debug_level>2) { System.out.println("mdelta"); mdelta.print(18, 6); } double scale = 1.0; double [] delta = mdelta.getColumnPackedCopy(); double [] new_vector = parameters_vector.clone(); for (int i = 0; i < parameters_vector.length; i++) { new_vector[i] += scale * delta[i]; } double [] fx = getFxDerivs( new_vector, // double [] vector, last_jt, // final double [][] jt, // should be null or initialized with [vector.length][] debug_level); // final int debug_level) double [] rms = new double[2]; last_ymfx = getYminusFxWeighted( // vector_XYS, // final double [][] vector_XYS, fx, // final double [] fx, rms); // final double [] rms_fp // null or [2] if (debug_level > 2) { /* dbgYminusFx(this.last_ymfx, "next y-fX"); dbgXY(new_vector, "XY-correction"); */ } if (last_ymfx == null) { return null; // need to re-init/restart LMA } this.good_or_bad_rms = rms.clone(); if (rms[0] < this.last_rms[0]) { // improved rslt[0] = true; rslt[1] = rms[0] >=(this.last_rms[0] * (1.0 - rms_diff)); this.last_rms = rms.clone(); this.parameters_vector = new_vector.clone(); if (debug_level > 2) { // print vectors in some format /* System.out.print("delta: "+corr_delta.toString()+"\n"); System.out.print("New vector: "+new_vector.toString()+"\n"); System.out.println(); */ } } else { // worsened rslt[0] = false; rslt[1] = false; // do not know, caller will decide // restore state fx = getFxDerivs( parameters_vector, // double [] vector, last_jt, // final double [][] jt, // should be null or initialized with [vector.length][] debug_level); // final int debug_level) last_ymfx = getYminusFxWeighted( fx, // final double [] fx, this.last_rms); // final double [] rms_fp // null or [2] if (last_ymfx == null) { return null; // need to re-init/restart LMA } if (debug_level > 2) { /* dbgJacobians( corr_vector, // GeometryCorrection.CorrVector corr_vector, 1E-5, // double delta, true); //boolean graphic) */ } } return rslt; } private double [][] getWJtJlambda( final double lambda, final double [][] jt) { final int num_pars = jt.length; final int num_pars2 = num_pars * num_pars; final int nup_points = jt[0].length; final double [][] wjtjl = new double [num_pars][num_pars]; final Thread[] threads = ImageDtt.newThreadArray(); final AtomicInteger ai = new AtomicInteger(0); for (int ithread = 0; ithread < threads.length; ithread++) { threads[ithread] = new Thread() { public void run() { for (int indx = ai.getAndIncrement(); indx < num_pars2; indx = ai.getAndIncrement()) { int i = indx / num_pars; int j = indx % num_pars; if (j >= i) { double d = 0.0; for (int k = 0; k < nup_points; k++) { if (jt[i][k] != 0) { d+=0; } d += weights[k]*jt[i][k]*jt[j][k]; } wjtjl[i][j] = d; if (i == j) { wjtjl[i][j] += d * lambda; } else { wjtjl[j][i] = d; } } } } }; } ImageDtt.startAndJoin(threads); return wjtjl; } private double [] getYminusFxWeighted( final double [] fx, final double [] rms_fp // null or [2] ) { final Thread[] threads = ImageDtt.newThreadArray(); final AtomicInteger ai = new AtomicInteger(0); final double [] wymfw = new double [fx.length]; double [] swd2 = new double[num_pairs]; for (int ithread = 0; ithread < threads.length; ithread++) { threads[ithread] = new Thread() { public void run() { for (int nPair = ai.getAndIncrement(); nPair < num_pairs; nPair = ai.getAndIncrement()) { int np2 = 2*nPair; for (int i1 = 0; i1 < 2; i1++) { int i = np2+i1; double d = y_vector[i] - fx[i]; double wd = d * weights[i]; wymfw[i] = wd; swd2[nPair] += d * wd; } } } }; } ImageDtt.startAndJoin(threads); double s_rms = 0; for (int nPair = 0; nPair < num_pairs; nPair++) { s_rms += swd2[nPair]; } double s_rms_pure = s_rms/pure_weight; // num_scenes is not that large; sum w/o threads for (int nScene = 0; nScene < num_scenes; nScene++) { for (int i1 = 0; i1 < 2; i1++) { int i = 2*num_pairs + 2*nScene + i1; double d = y_vector[i] - fx[i]; double wd = d * weights[i]; wymfw[i] = wd; s_rms += d * wd; } } if (rms_fp != null) { rms_fp[0] = Math.sqrt(s_rms); if (rms_fp.length > 1) { rms_fp[1] = Math.sqrt(s_rms_pure); } } return wymfw; } public double [] getRms() { return last_rms; } public double [] getInitialRms() { return initial_rms; } public void updateEqualize(OrthoMapsCollection maps_collection) { for (int nscene = 0; nscene < num_scenes; nscene++) { double [] equalize = maps_collection.ortho_maps[indices[nscene]].getEqualize(); equalize[0] = parameters_vector[2*nscene + 0]; equalize[1] = parameters_vector[2*nscene + 1]; } } public double [][] getEqualize(){ double [][] equalize = new double [num_scenes][]; for (int nscene = 0; nscene < num_scenes; nscene++) { equalize[nscene] = new double [] {parameters_vector[2*nscene+0],parameters_vector[2*nscene+1]}; } return equalize; } //equalize private double [] getFxDerivs( final double [] vector, final double [][] jt, // should be null or initialized with [vector.length][] final int debug_level) { final double [] fx = new double [weights.length]; // weights.length]; : weights.length : if (jt != null) { for (int i = 0; i < jt.length; i++) { jt[i] = new double [weights.length]; // weights.length]; } } final Thread[] threads = ImageDtt.newThreadArray(); final AtomicInteger ai = new AtomicInteger(0); for (int pair_group = 0; pair_group<pairs_ni.length; pair_group++) { final int fpair_group = pair_group; for (int ithread = 0; ithread < threads.length; ithread++) { threads[ithread] = new Thread() { public void run() { for (int iPair = ai.getAndIncrement(); iPair < pairs_ni[fpair_group].length; iPair = ai.getAndIncrement()) { int nPair = pairs_ni[fpair_group][iPair]; // index in pairs[][] int [] pair = pairs[nPair]; double a0= vector[2*pair[0]+0], a1=vector[2*pair[1]+0]; double b0= vector[2*pair[0]+1], b1=vector[2*pair[1]+1]; double a = a1/a0; double b = (b1-b0)/a0; fx[2*nPair+0] = a; fx[2*nPair+1] = b; if (jt != null) { jt[2*pair[0] + 0][2*nPair + 0] += -a1/a0/a0; // da/da0 jt[2*pair[0] + 0][2*nPair + 1] += -(b1-b0)/a0/a0; // db/da0 jt[2*pair[1] + 0][2*nPair + 0] += 1/a0; // da/da1 /// jt[2*pair[1] + 0][2*nPair + 1] += 0; // db/da1 /// jt[2*pair[0] + 1][2*nPair + 0] += 0; // da/db0 jt[2*pair[0] + 1][2*nPair + 1] += -1/a0; // db/db0 /// jt[2*pair[1] + 1][2*nPair + 0] += 0; // da/db1 jt[2*pair[1] + 1][2*nPair + 1] += 1/a0; // db/db1 } } } }; } ImageDtt.startAndJoin(threads); ai.set(0); } for (int ithread = 0; ithread < threads.length; ithread++) { threads[ithread] = new Thread() { public void run() { for (int nScene = ai.getAndIncrement(); nScene < num_scenes; nScene = ai.getAndIncrement()){ int npar = 2 * nScene; int ny = 2 * num_pairs + 2 * nScene; double a = vector[npar + 0]; double b = vector[npar + 1]; fx[ny + 0] = a; fx[ny + 1] = b/a; if (jt != null) { jt[npar + 0][ny + 0] = 1; jt[npar + 0][ny + 1] = -b/a/a; /// jt[npar + 1][ny + 0] = 0; jt[npar + 1][ny + 1] = 1/a; } } } }; } ImageDtt.startAndJoin(threads); return fx; } }