Commit 524c2e2f authored by Andrey Filippov's avatar Andrey Filippov

started geometry correction

parent f15b9b74
......@@ -92,8 +92,11 @@ public class GPUTileProcessor {
static String GPU_RESOURCE_DIR = "kernels";
static String [] GPU_KERNEL_FILES = {"dtt8x8.cuh","TileProcessor.cuh"};
// "*" - generated defines, first index - separately compiled unit
// static String [][] GPU_SRC_FILES = {{"*","dtt8x8.h","dtt8x8.cu"},{"*","dtt8x8.h","TileProcessor.cuh"}};
static String [][] GPU_SRC_FILES = {{"*","dtt8x8.h","dtt8x8.cu","TileProcessor.cuh"}};
/* static String [][] GPU_SRC_FILES = {
{"*","dtt8x8.h","dtt8x8.cu"},
{"*","dtt8x8.h","geometry_correction.h","TileProcessor.h","TileProcessor.cuh"}};
*/
static String [][] GPU_SRC_FILES = {{"*","dtt8x8.h","dtt8x8.cu","geometry_correction.h","TileProcessor.h","TileProcessor.cuh"}};
// static String [][] GPU_SRC_FILES = {{"*","dtt8x8.cuh","TileProcessor.cuh"}};
static String GPU_CONVERT_CORRECT_TILES_NAME = "convert_correct_tiles"; // name in C code
static String GPU_IMCLT_RBG_NAME = "imclt_rbg"; // name in C code
......
package com.elphel.imagej.tileprocessor;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
import java.util.ArrayList;
import java.util.Properties;
......@@ -103,12 +110,111 @@ public class GeometryCorrection {
public CorrVector extrinsic_corr;
public RigOffset rigOffset = null;
public int [] woi_tops; // used to calculate scanline timing
public int [] woi_tops; // used to calculate scanline timing
public float [] toFloatArray() { // for GPU comparison
return new float[] {
(float) focalLength, // =FOCAL_LENGTH;
(float) pixelSize, // = PIXEL_SIZE; //um
(float) distortionRadius, // = DISTORTION_RADIUS; // mm - half width of the sensor
(float) distortionA8, //r^8 (normalized to focal length or to sensor half width?)
(float) distortionA7, //r^7 (normalized to focal length or to sensor half width?)
(float) distortionA6, //r^6 (normalized to focal length or to sensor half width?)
(float) distortionA5, //r^5 (normalized to focal length or to sensor half width?)
(float) distortionA, // r^4 (normalized to focal length or to sensor half width?)
(float) distortionB, // r^3
(float) distortionC, // r^2
// parameters, common for all sensors
(float) elevation, // degrees, up - positive;
(float) heading, // degrees, CW (from top) - positive
(float) forward[0], (float) forward[1], (float) forward[2], (float) forward[3], // [NUM_CAMS];
(float) right[0], (float) right[1], (float) right[2], (float) right[3], // [NUM_CAMS];
(float) height[0], (float) height[1], (float) height[2], (float) height[3], // [NUM_CAMS];
(float) roll[0], (float) roll[1], (float) roll[2], (float) roll[3], // [NUM_CAMS]; // degrees, CW (to target) - positive
(float) common_right, // mm right, camera center
(float) common_forward, // mm forward (to target), camera center
(float) common_height, // mm up, camera center
(float) common_roll, // degrees CW (to target) camera as a whole
// (float) [][] XYZ_he; // all cameras coordinates transformed to eliminate heading and elevation (rolls preserved)
// (float) [][] XYZ_her = null; // XYZ of the lenses in a corrected CCS (adjusted for to elevation, heading, common_roll)
(float) rXY[0][0], (float) rXY[0][1], // [NUM_CAMS][2]; // XY pairs of the in a normal plane, relative to disparityRadius
(float) rXY[1][0], (float) rXY[1][1],
(float) rXY[2][0], (float) rXY[2][1],
(float) rXY[3][0], (float) rXY[3][1],
// (float) [][] rXY_ideal = {{-0.5, -0.5}, {0.5,-0.5}, {-0.5, 0.5}, {0.5,0.5}};
// only used for the multi-quad systems
(float) cameraRadius, // average distance from the "mass center" of the sensors to the sensors
(float) disparityRadius //=150.0; // distance between cameras to normalize disparity units to. sqrt(2)*disparityRadius for quad
};
}
public int [] getWOITops() {// not used in lwir
return woi_tops;
}
public double [] getRByRDist() {
return this.rByRDist;
}
public double getStepR() {
return this.stepR;
}
// save files for GPU comparison
public void saveFloatsGPU(String file_prefix) throws IOException {
// Save GeometryCorrection global data
int sizeof_float = 4;
{
String gc_path = file_prefix+".geometry_correction";
FileOutputStream fos = new FileOutputStream(gc_path);
DataOutputStream dos = new DataOutputStream(fos);
WritableByteChannel channel = Channels.newChannel(dos);
float [] fgc = toFloatArray();
ByteBuffer bb = ByteBuffer.allocate(fgc.length * sizeof_float);
bb.order(ByteOrder.LITTLE_ENDIAN);
bb.clear();
for (int i = 0; i < fgc.length; i++) {
bb.putFloat(fgc[i]);
}
bb.flip();
channel.write(bb);
dos.close();
}
{
String gc_path = file_prefix+".correction_vector";
FileOutputStream fos = new FileOutputStream(gc_path);
DataOutputStream dos = new DataOutputStream(fos);
WritableByteChannel channel = Channels.newChannel(dos);
float [] fcv = getCorrVector().toFloatArray();
ByteBuffer bb = ByteBuffer.allocate(fcv.length * sizeof_float);
bb.order(ByteOrder.LITTLE_ENDIAN);
bb.clear();
for (int i = 0; i < fcv.length; i++) {
bb.putFloat(fcv[i]);
}
bb.flip();
channel.write(bb);
dos.close();
}
//double [] getRByRDist()
{
String gc_path = file_prefix+".rbyrdist";
FileOutputStream fos = new FileOutputStream(gc_path);
DataOutputStream dos = new DataOutputStream(fos);
WritableByteChannel channel = Channels.newChannel(dos);
double [] rByRDist = getRByRDist();
ByteBuffer bb = ByteBuffer.allocate(rByRDist.length * sizeof_float);
bb.order(ByteOrder.LITTLE_ENDIAN);
bb.clear();
for (int i = 0; i < rByRDist.length; i++) {
bb.putFloat((float) rByRDist[i]);
}
bb.flip();
channel.write(bb);
dos.close();
}
}
public int [] getSensorWH() {
int [] wh = {this.pixelCorrectionWidth, this.pixelCorrectionHeight};
......@@ -1333,6 +1439,17 @@ public class GeometryCorrection {
return new CorrVector(vector);
}
public float [] toFloatArray() {
if (vector == null) {
return null;
}
float [] fvector = new float [vector.length];
for (int i = 0; i < vector.length; i++) {
fvector[i] = (float) vector[i];
}
return fvector;
}
public double [] toArray() // USED in lwir
{
return vector;
......@@ -2721,12 +2838,9 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double pYci = rvi.get(1, 0) * norm_z;
// debug
double norm_z_dbg = fl_pix/vi.get(2, 0);
double pXci_dbg = vi.get(0, 0) * norm_z_dbg;
double pYci_dbg = vi.get(1, 0) * norm_z_dbg;
double norm_z_dbg = fl_pix/vi.get(2, 0);
double pXci_dbg = vi.get(0, 0) * norm_z_dbg;
double pYci_dbg = vi.get(1, 0) * norm_z_dbg;
// Re-apply distortion
double rNDi = Math.sqrt(pXci*pXci + pYci*pYci); // in pixels
......@@ -2734,8 +2848,8 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double ri = rNDi* ri_scale; // relative to distortion radius
// double rD2rND = (1.0 - distortionA8 - distortionA7 - distortionA6 - distortionA5 - distortionA - distortionB - distortionC);
double rNDi_dbg = Math.sqrt(pXci_dbg*pXci_dbg + pYci_dbg*pYci_dbg); // in pixels
double ri_dbg = rNDi_dbg* ri_scale; // relative to distortion radius
double rNDi_dbg = Math.sqrt(pXci_dbg*pXci_dbg + pYci_dbg*pYci_dbg); // in pixels
double ri_dbg = rNDi_dbg* ri_scale; // relative to distortion radius
double rD2rND = 1.0;
......@@ -2745,13 +2859,14 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
rD2rND += rad_coeff[j]*(rri - 1.0); // Fixed
}
/*
double rD2rND_dbg = 1.0;
double rri_dbg = 1.0;
for (int j = 0; j < rad_coeff.length; j++){
rri_dbg *= ri_dbg;
rD2rND_dbg += rad_coeff[j]*(rri_dbg - 1.0); // Fixed
}
*/
// Get port pixel coordinates by scaling the 2d vector with Rdistorted/Dnondistorted coefficient)
......@@ -2842,7 +2957,6 @@ matrix([[-0.125, -0.125, 0.125, 0.125, -0.125, 0.125, -0. , -0. , -0.
double ers_Yci = delta_t* (dpYci_dtilt * imu[0] + dpYci_dazimuth * imu[1] + dpYci_droll * imu[2]);
if (xyz != null) {
double k = SCENE_UNITS_SCALE * this.disparityRadius;
// double wdisparity = -(k * this.focalLength / (0.001*this.pixelSize)) / xyz[2];
double wdisparity = disparity;
double dwdisp_dz = (k * this.focalLength / (0.001*this.pixelSize)) / (xyz[2] * xyz[2]);
dpXci_pYci_imu_lin[0][0] = -wdisparity / k; // dpx/ dworld_X
......
......@@ -2548,40 +2548,6 @@ public class ImageDtt {
{ 0.5, 0.5}};
final int transform_len = transform_size * transform_size;
/*
final double [] filter_direct= new double[transform_len];
if (corr_sigma == 0) {
filter_direct[0] = 1.0;
for (int i= 1; i<filter_direct.length;i++) filter_direct[i] =0;
} else {
for (int i = 0; i < transform_size; i++){
for (int j = 0; j < transform_size; j++){
filter_direct[i*transform_size+j] = Math.exp(-(i*i+j*j)/(2*corr_sigma)); // FIXME: should be sigma*sigma !
}
}
}
// normalize
double sum = 0;
for (int i = 0; i < transform_size; i++){
for (int j = 0; j < transform_size; j++){
double d = filter_direct[i*transform_size+j];
d*=Math.cos(Math.PI*i/(2*transform_size))*Math.cos(Math.PI*j/(2*transform_size));
if (i > 0) d*= 2.0;
if (j > 0) d*= 2.0;
sum +=d;
}
}
for (int i = 0; i<filter_direct.length; i++){
filter_direct[i] /= sum;
}
DttRad2 dtt = new DttRad2(transform_size);
final double [] filter= dtt.dttt_iiie(filter_direct);
for (int i=0; i < filter.length;i++) filter[i] *= 2*transform_size;
*/
final double [] filter = doubleGetCltLpfFd(corr_sigma);
// prepare disparity maps and weights
......
......@@ -1502,10 +1502,16 @@ public class TwoQuadCLT {
true);
} catch (IOException e) {
System.out.println("Failed to save flattened kernels tp "+kernel_dir);
// TODO Auto-generated catch block
e.printStackTrace();
} // boolean transpose);
try {
quadCLT_main.getGeometryCorrection().saveFloatsGPU(kernel_dir +"main");
} catch (IOException e) {
System.out.println("Failed to save geometry correction data to "+kernel_dir);
e.printStackTrace();
}
if (debugLevel < -1000) {
return null;
}
......
......@@ -41,6 +41,8 @@
#ifndef JCUDA
#include "tp_defines.h"
#include "dtt8x8.h"
#include "geometry_correction.h"
#include "TileProcessor.h"
#endif // #ifndef JCUDA
#define TASK_TEXTURE_BITS ((1 << TASK_TEXTURE_N_BIT) | (1 << TASK_TEXTURE_E_BIT) | (1 << TASK_TEXTURE_S_BIT) | (1 << TASK_TEXTURE_W_BIT))
......@@ -106,11 +108,12 @@
#define DBG_TILE_Y 111 // 66
#define DBG_TILE (DBG_TILE_Y * 324 + DBG_TILE_X)
#undef DBG_MARK_DBG_TILE 1
#undef DBG_MARK_DBG_TILE
//56494
// struct tp_task
//#define TASK_SIZE 12
#if 0
struct tp_task {
int task;
union {
......@@ -119,6 +122,7 @@ struct tp_task {
};
float xy[NUM_CAMS][2];
};
#endif
struct CltExtra{
float data_x; // kernel data is relative to this displacement X (0.5 pixel increments)
float data_y; // kernel data is relative to this displacement Y (0.5 pixel increments)
......@@ -826,6 +830,7 @@ __device__ void imclt_plane( // not implemented, not used
float * gpu_rbg, // WIDTH, HEIGHT
const size_t dstride); // in floats (pixels)
#if 0
extern "C"
__global__ void clear_texture_list(
int * gpu_texture_indices,// packed tile + bits (now only (1 << 7)
......@@ -892,6 +897,7 @@ __global__ void imclt_rbg(
int h_offset,
const size_t dstride); // in floats (pixels)
//===========================
#endif
extern "C"
__global__ void correlate2D(
......
/**
**
** TileProcessor.h
**
** Copyright (C) 2020 Elphel, Inc.
**
** -----------------------------------------------------------------------------**
**
** TileProcessor.h is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
**
** Additional permission under GNU GPL version 3 section 7
**
** If you modify this Program, or any covered work, by linking or
** combining it with NVIDIA Corporation's CUDA libraries from the
** NVIDIA CUDA Toolkit (or a modified version of those libraries),
** containing parts covered by the terms of NVIDIA CUDA Toolkit
** EULA, the licensors of this Program grant you additional
** permission to convey the resulting work.
** -----------------------------------------------------------------------------**
*/
/**
**************************************************************************
* \file TileProcessor.h
* \brief header file for the Tile Processor for frequency domain
*/
#pragma once
#ifndef NUM_CAMS
#include "tp_defines.h"
#endif
extern "C" __global__ void clear_texture_list(
int * gpu_texture_indices,// packed tile + bits (now only (1 << 7)
int width, // <= TILESX, use for faster processing of LWIR images
int height); // <= TILESY, use for faster processing of LWIR images
extern "C" __global__ void mark_texture_tiles(
struct tp_task * gpu_tasks,
int num_tiles, // number of tiles in task list
int * gpu_texture_indices); // packed tile + bits (now only (1 << 7)
extern "C" __global__ void mark_texture_neighbor_tiles(
struct tp_task * gpu_tasks,
int num_tiles, // number of tiles in task list
int * gpu_texture_indices, // packed tile + bits (now only (1 << 7)
int * woi); // x,y,width,height of the woi
extern "C" __global__ void gen_texture_list(
struct tp_task * gpu_tasks,
int num_tiles, // number of tiles in task list
int * gpu_texture_indices, // packed tile + bits (now only (1 << 7)
int * num_texture_tiles, // number of texture tiles to process
int * woi); // x,y,width,height of the woi
extern "C" __global__ void clear_texture_rbga(
int texture_width,
int texture_slice_height,
const size_t texture_rbga_stride, // in floats 8*stride
float * gpu_texture_tiles); // (number of colors +1 + ?)*16*16 rgba texture tiles
extern "C" __global__ void textures_accumulate(
// int border_tile, // if 1 - watch for border
int * woi, // x, y, width,height
float ** gpu_clt, // [NUM_CAMS] ->[TILESY][TILESX][NUM_COLORS][DTT_SIZE*DTT_SIZE]
size_t num_texture_tiles, // number of texture tiles to process
int * gpu_texture_indices,// packed tile + bits (now only (1 << 7)
float * gpu_port_offsets, // relative ports x,y offsets - just to scale differences, may be approximate
int colors, // number of colors (3/1)
int is_lwir, // do not perform shot correction
float min_shot, // 10.0
float scale_shot, // 3.0
float diff_sigma, // pixel value/pixel change
float diff_threshold, // pixel value/pixel change
float min_agree, // minimal number of channels to agree on a point (real number to work with fuzzy averages)
float weight0, // scale for R
float weight1, // scale for B
float weight2, // scale for G
int dust_remove, // Do not reduce average weight when only one image differs much from the average
int keep_weights, // return channel weights after A in RGBA (was removed) (should be 0 if gpu_texture_rbg)?
// combining both non-overlap and overlap (each calculated if pointer is not null )
size_t texture_rbg_stride, // in floats
float * gpu_texture_rbg, // (number of colors +1 + ?)*16*16 rgba texture tiles
size_t texture_stride, // in floats (now 256*4 = 1024)
float * gpu_texture_tiles); // (number of colors +1 + ?)*16*16 rgba texture tiles
extern "C" __global__ void imclt_rbg(
float * gpu_clt, // [TILESY][TILESX][NUM_COLORS][DTT_SIZE*DTT_SIZE]
float * gpu_rbg, // WIDTH, 3 * HEIGHT
int apply_lpf,
int mono, // defines lpf filter
int color, // defines location of clt data
int v_offset,
int h_offset,
const size_t dstride); // in floats (pixels)
/**
**
** geometry_correction.h
**
** Copyright (C) 2020 Elphel, Inc.
**
** -----------------------------------------------------------------------------**
**
** geometry_correction.h is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program. If not, see <http://www.gnu.org/licenses/>.
**
** Additional permission under GNU GPL version 3 section 7
**
** If you modify this Program, or any covered work, by linking or
** combining it with NVIDIA Corporation's CUDA libraries from the
** NVIDIA CUDA Toolkit (or a modified version of those libraries),
** containing parts covered by the terms of NVIDIA CUDA Toolkit
** EULA, the licensors of this Program grant you additional
** permission to convey the resulting work.
** -----------------------------------------------------------------------------**
*/
/**
**************************************************************************
* \file geometry_correction.h
* \brief header file for geometry correction - per-tile/per camera calculation of the tile offset
*/
#pragma once
#ifndef NUM_CAMS
#include "tp_defines.h"
#endif
struct tp_task {
int task;
union {
int txy;
unsigned short sxy[2];
};
float xy[NUM_CAMS][2];
};
struct corr_vector{
float tilt [NUM_CAMS-1]; // 0..2
float azimuth [NUM_CAMS-1]; // 3..5
float roll [NUM_CAMS]; // 6..9
float zoom [NUM_CAMS-1]; // 10..12
// for ERS correction:
float imu_rot [3]; // d_tilt/dt (rad/s), d_az/dt, d_roll/dt 13..15
float imu_move[3]; // dx/dt, dy/dt, dz/dt 16..19
};
struct gc {
float focalLength; // =FOCAL_LENGTH;
float pixelSize; // = PIXEL_SIZE; //um
float distortionRadius; // = DISTORTION_RADIUS; // mm - half width of the sensor
float distortionA8; //r^8 (normalized to focal length or to sensor half width?)
float distortionA7; //r^7 (normalized to focal length or to sensor half width?)
float distortionA6; //r^6 (normalized to focal length or to sensor half width?)
float distortionA5; //r^5 (normalized to focal length or to sensor half width?)
float distortionA; // r^4 (normalized to focal length or to sensor half width?)
float distortionB; // r^3
float distortionC; // r^2
// parameters, common for all sensors
float elevation; // degrees, up - positive;
float heading; // degrees, CW (from top) - positive
float forward [NUM_CAMS];
float right [NUM_CAMS];
float height [NUM_CAMS];
float roll [NUM_CAMS]; // degrees, CW (to target) - positive
float common_right; // mm right, camera center
float common_forward; // mm forward (to target), camera center
float common_height; // mm up, camera center
float common_roll; // degrees CW (to target) camera as a whole
// float [][] XYZ_he; // all cameras coordinates transformed to eliminate heading and elevation (rolls preserved)
// float [][] XYZ_her = null; // XYZ of the lenses in a corrected CCS (adjusted for to elevation, heading, common_roll)
float rXY [NUM_CAMS][3]; // XY pairs of the in a normal plane, relative to disparityRadius
// float [][] rXY_ideal = {{-0.5, -0.5}, {0.5,-0.5}, {-0.5, 0.5}, {0.5,0.5}};
// only used for the multi-quad systems
float cameraRadius; // =0; // average distance from the "mass center" of the sensors to the sensors
float disparityRadius; // =150.0; // distance between cameras to normalize disparity units to. sqrt(2)*disparityRadius for quad
};
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment