Commit a91aab50 authored by Andrey Filippov's avatar Andrey Filippov

Tested tensorflow model on the data in inside this program

parent 6e5aa3ce
...@@ -387,6 +387,7 @@ private Panel panel1, ...@@ -387,6 +387,7 @@ private Panel panel1,
public PixelMapping.InterSensor.DisparityTiles DISPARITY_TILES=null; public PixelMapping.InterSensor.DisparityTiles DISPARITY_TILES=null;
public ImagePlus DBG_IMP = null; public ImagePlus DBG_IMP = null;
public ImagePlus CORRELATE_IMP = null; public ImagePlus CORRELATE_IMP = null;
public TensorflowInferModel TENSORFLOW_INFER_MODEL = null;
public class SyncCommand{ public class SyncCommand{
public boolean isRunning= false; public boolean isRunning= false;
...@@ -4754,18 +4755,21 @@ private Panel panel1, ...@@ -4754,18 +4755,21 @@ private Panel panel1,
return; return;
/* ======================================================================== */ /* ======================================================================== */
} else if (label.equals("TF TEST")) { } else if (label.equals("TF TEST")) {
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
// link 1 (general): https://www.tensorflow.org/api_docs/java/reference/org/tensorflow/package-summary boolean keep_empty = true;
// link 2 (example of an TF & IJ plugin): https://github.com/google/microscopeimagequality/blob/main/microscopeimagequality/data/imagej/src/main/java/MicroscopeImageFocusQualityClassifier.java if (TENSORFLOW_INFER_MODEL == null) {
TENSORFLOW_INFER_MODEL = new TensorflowInferModel(
TensorflowExamplePlugin t = new TensorflowExamplePlugin(); 324, // int tilesX,
t.run(); 242, // int tilesY,
9, // int corr_side,
4 //int num_layers
);
}
TENSORFLOW_INFER_MODEL.test_tensorflow(keep_empty);
return; return;
//JTabbedTest //JTabbedTest
// End of buttons code // End of buttons code
} }
DEBUG_LEVEL=MASTER_DEBUG_LEVEL;
} }
public String getSaveCongigPath() { public String getSaveCongigPath() {
......
...@@ -3,20 +3,17 @@ ...@@ -3,20 +3,17 @@
* SPDX-License-Identifier: GPL-3.0-or-later * SPDX-License-Identifier: GPL-3.0-or-later
*/ */
import org.tensorflow.Graph; import java.nio.FloatBuffer;
import org.tensorflow.Session; import java.nio.IntBuffer;
import org.tensorflow.Tensor; import java.util.ArrayList;
import org.tensorflow.Tensors;
import org.tensorflow.TensorFlow;
import org.tensorflow.SavedModelBundle; import org.tensorflow.SavedModelBundle;
import org.tensorflow.OperationBuilder; import org.tensorflow.Tensor;
import org.tensorflow.Shape;
import org.tensorflow.Output;
import org.tensorflow.Operation;
import java.util.ArrayList; import ij.IJ;
import java.util.Collection; import ij.ImagePlus;
import java.util.List; import ij.ImageStack;
import ij.WindowManager;
/** /**
...@@ -48,222 +45,296 @@ Sessions and Tensors ...@@ -48,222 +45,296 @@ Sessions and Tensors
*/ */
public class TensorflowExamplePlugin public class TensorflowInferModel
{ {
public final static String EXPORTDIR = "/home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_5x5_main_13_heur/exportdir"; final static String TRAINED_MODEL = "trained_model"; // /home/oleg/GIT/python3-imagej-tiff/data_sets/tf_data_5x5_main_13_heur/exportdir";
// tf.saved_model.tag_constants.SERVING = "serve" final static String SERVING = "serve";
public final static String SERVING = "serve"; final int tilesX, tilesY, num_tiles, num_layers;
final int corr_side, corr_side2;
// final long [] shape_corr2d;
// final long [] shape_target_disparity;
// final long [] shape_tiles_stage1;
// final long [] shape_tiles_stage2;
final FloatBuffer fb_corr2d;
final FloatBuffer fb_target_disparity;
final IntBuffer fb_tiles_stage1;
final IntBuffer fb_tiles_stage2;
final FloatBuffer fb_predicted;
/*
long[] shape = new long[] {batch, imageSize};
*/
// public
final SavedModelBundle bundle;
public static void run() public TensorflowInferModel(int tilesX, int tilesY, int corr_side, int num_layers)
{ {
System.out.println("TensorflowExamplePlugin run"); this.tilesX = tilesX;
try { this.tilesY = tilesY;
main(); this.num_tiles = tilesX*tilesY;
} catch (Exception e) { this.num_layers = num_layers;
// TODO Auto-generated catch block this.corr_side = corr_side;
e.printStackTrace(); this.corr_side2 = corr_side * corr_side;
} // allocate buffers to be used for tensors
this.fb_corr2d = FloatBuffer.allocate(num_tiles * corr_side2*num_layers);
this.fb_target_disparity = FloatBuffer.allocate(num_tiles );
this.fb_tiles_stage1 = IntBuffer.allocate(num_tiles );
this.fb_tiles_stage2 = IntBuffer.allocate(num_tiles );
this.fb_predicted = FloatBuffer.allocate(num_tiles );
String abs_model_path = getClass().getClassLoader().getResource(TRAINED_MODEL).getFile();
System.out.println("TensorflowInferModel model path: "+abs_model_path);
// this will load graph/data and open a session that does not need to be closed until the program is closed
//// bundle = null;
bundle = SavedModelBundle.load(abs_model_path,SERVING);
// Operation opr = bundle.graph().operation("rv_stage1_out");
} }
/**
* From https://github.com/DIVSIO/tensorflow_java_cli_example/blob/master/src/main/java/divisio/example/tensorflow/cli/RunRegression.java
*/
/**
* wraps a single float in a tensor
* @param f the float to wrap
* @return a tensor containing the float
*/
private static Tensor<Float> toTensor(final float f, final Collection<Tensor<?>> tensorsToClose) {
final Tensor<Float> t = Tensors.create(f);
if (tensorsToClose != null) {
tensorsToClose.add(t);
}
return t;
}
private static Tensor<Float> toTensor2DFloat(final float [][] f, final Collection<Tensor<?>> tensorsToClose) { public void run_stage1(
final Tensor<Float> t = Tensors.create(f); FloatBuffer fb_corr2d,
if (tensorsToClose != null) { FloatBuffer fb_target_disparity,
tensorsToClose.add(t); IntBuffer fb_tiles_stage1)
} {
return t; int ntiles = fb_tiles_stage1.limit(); // actual number of entries
long [] shape_corr2d = new long [] {ntiles, corr_side2*num_layers};
long [] shape_target_disparity = new long [] {ntiles, 1};
long [] shape_tiles_stage1 = new long [] {ntiles};
final Tensor<Float> t_corr2d = Tensor.create(shape_corr2d, fb_corr2d);
final Tensor<Float> target_disparity = Tensor.create(shape_target_disparity, fb_target_disparity);
final Tensor<Integer> t_tiles_stage1 = Tensor.create(shape_tiles_stage1,fb_tiles_stage1);
// this run does not output any data, but maybe it should still be captured and disposed of?
final Tensor<?> t_result_stage1 = bundle.session().runner()
.feed("ph_corr2d", t_corr2d)
.feed("ph_target_disparity", target_disparity)
.feed("ph_ntile", t_tiles_stage1)
.fetch("Disparity_net/stage1done:0")
.run()
.get(0);
t_result_stage1. close();
t_corr2d. close();
target_disparity. close();
t_tiles_stage1. close();
} }
private static Tensor<Integer> toTensor1DInt(final int [] f, final Collection<Tensor<?>> tensorsToClose) { public void run_stage2(
final Tensor<Integer> t = Tensors.create(f); IntBuffer fb_tiles_stage2,
if (tensorsToClose != null) { FloatBuffer fb_disparity) {
tensorsToClose.add(t); int ntiles = fb_tiles_stage2.limit(); // actual number of entries
} long [] shape_tiles_stage2 = new long [] {ntiles};
return t; final Tensor<Integer> t_tiles_stage2 = Tensor.create(shape_tiles_stage2, fb_tiles_stage2);
final Tensor<?> t_result_disparity = bundle.session().runner()
.feed("ph_ntile_out",t_tiles_stage2)
.fetch("Disparity_net/stage2_out_sparse:0")
.run()
.get(0);
fb_disparity.rewind();
t_result_disparity.writeTo(fb_disparity);
t_result_disparity. close();
t_tiles_stage2. close();
} }
private static void closeTensors(final Collection<Tensor<?>> ts) {
for (final Tensor<?> t : ts) { //**************************
try { // helper class to prepare TileProcessor task
t.close(); class TfInTile{
} catch (final Exception e) { float [] corr2d;
System.err.println("Error closing Tensor."); float target_disparity;
e.printStackTrace(); float gt_disparity;
float gt_strength;
int tile;
public void setFromCorr2d(
float [][] data,
int tileX,
int tileY,
int corr_side,
int tilesX
) {
int corr_side2 = corr_side*corr_side;
int layers = data.length -1;
this.corr2d = new float [layers * corr_side2];
float [] other = new float [corr_side2];
int width = tilesX * corr_side;
int tl = tileY * corr_side * width + tileX * corr_side; // index to the
for (int nl = 0; nl <= layers; nl++) {
if (nl < layers) {
for (int row = 0; row < corr_side; row++) {
System.arraycopy(data[nl], tl+width*row, corr2d, nl*corr_side2 + row*corr_side, corr_side);
}
} else {
for (int row = 0; row < corr_side; row++) {
System.arraycopy(data[layers], tl+width*row, other, row*corr_side, corr_side);
}
this.target_disparity = other[ImageDtt.ML_OTHER_TARGET];
this.gt_disparity = other[ImageDtt.ML_OTHER_GTRUTH];
this.gt_strength = other[ImageDtt.ML_OTHER_GTRUTH_STRENGTH];
}
}
if (Float.isNaN(this.target_disparity)) {
corr2d = new float[corr2d.length]; // zero them all
} }
this.tile = tileY*tilesX + tileX;
} }
ts.clear(); //System.arraycopy(sym_conv, 0, tile_in, 0, n2*n2)
} }
public int test_tensorflow(
boolean keep_empty) {
public static void main() throws Exception { int dbgX = 162;
int dbgY = 121;
final Graph smpb; // int dbgT = tilesX ^ dbgY + dbgX;
// init for variable? int corr_side = 9;
float [][] rv_stage1_out = new float[78408][32]; String [] slices = {"hor-pairs","vert-pairs","diagm-pair","vert-pairs","other"};
ImagePlus imp_src = WindowManager.getCurrentImage();
// from infer_qcds_01.py if (imp_src==null){
float [][] img_corr2d = new float[78408][324]; IJ.showMessage("Error","2D Correlation image stack required");
float [][] img_target = new float[78408][ 1]; return -1;
int [] img_ntile = new int[78408]; }
ImageStack corr_stack = imp_src.getStack();
// init ntile for testing? String [] labels = corr_stack.getSliceLabels();
for(int i=0;i<img_ntile.length;i++){ // for (int ii = 0; ii < labels.length; ii++) {
img_ntile[i] = i; // System.out.println(ii+": "+labels[ii]);
} // }
int tilesX= corr_stack.getWidth() / corr_side;
final SavedModelBundle bundle = SavedModelBundle.load(EXPORTDIR,SERVING); int tilesY= corr_stack.getHeight() / corr_side;
final List<Tensor<?>> tensorsToClose = new ArrayList<Tensor<?>>(5); float [][] corr_data = new float [slices.length][];
for (int nslice = 0; nslice < slices.length; nslice++ ) {
System.out.println("OK"); int ns = -1;
for (int i = 0; i < labels.length; i++) {
try { if (slices[nslice].equals(labels[i])) {
ns = i;
System.out.println("S0:"); break;
// read Variable info test }
Operation opr = bundle.graph().operation("rv_stage1_out"); }
System.out.println(opr.toString()); if (ns < 0) {
System.out.println("Slice "+slices[nslice]+" is not found in the image");
System.out.println("S1:"); return -1;
} else {
//opr = bundle.graph().operation("rv_stageY_out"); corr_data[nslice] = (float[]) corr_stack.getPixels(ns + 1) ;
//System.out.println(opr.toString()); }
}
// init variable via constant? ArrayList<TfInTile> tf_tiles = new ArrayList<TfInTile>();
//Tensor<Float> tsr = toTensor2DFloat(rv_stage1_out, tensorsToClose); for (int tileY = 0; tileY < tilesY; tileY++) {
/* for (int tileX = 0; tileX < tilesX; tileX++) {
Output builder_init = bundle.graph() if ((tileY==dbgY) && (tileX==dbgX)) {
.opBuilder("Const", "rv_stage1_out_init") System.out.println("tileY = "+tileY+", tileX = "+tileX+" tile = "+(tileY*tilesX + tileX));
.setAttr ("dtype", tsr.dataType()) }
.setAttr ("value", tsr) TfInTile tf_tile = new TfInTile();
.build() tf_tile.setFromCorr2d(
.output(0); corr_data, // float [][] data,
*/ tileX, // int tileX,
//System.out.println(builder_init); tileY, // int tileY,
corr_side, // int corr_side,
// variable tilesX); // int tilesX
//OperationBuilder builder2 = bundle.graph().opBuilder("Variable", "rv_stage1_out_extra_variable"); if (keep_empty || !Float.isNaN(tf_tile.target_disparity)) {
//.addInput(builder_init); // if (Float.isNaN(tf_tile.target_disparity)) {
// tf_tile.target_disparity = 0.0f;
//builder2. // tf_tile.gt_strength = 0.0f;
//bundle.graph().opBuilder("Assign", "Assign/" + builder2.op().name()).addInput(variable).addInput(value).build().output(0); // }
tf_tiles.add(tf_tile);
//Tensor<Float> tensorVal = tsr; }
//Output oValue = bundle.graph().opBuilder("Const", "rv_stage1_out_2").setAttr("dtype", tensorVal.dataType()).setAttr("value", tensorVal).build().output(0); }
//System.out.println(oValue); }
//Output oValue = bundle.graph().opBuilder("Variable", "rv_stage1_out").setAttr("value", tensorVal).build().output(0);
//bundle.graph().opBuilder("Assign", "Assign/rv_stage1_out").setAttr("value", tsr).build(); // sets the limit to the capacity and the position to zero.
fb_corr2d.clear();
System.out.println("Stage 0.1"); fb_target_disparity.clear();
//bundle.session().runner().fetch("rv_stageY_out").run(); fb_tiles_stage1.clear();
System.out.println("Stage 0.2"); fb_tiles_stage2.clear();
bundle.session().runner().fetch("rv_stage1_out").run(); for (int i = 0; i < tf_tiles.size(); i++) {
System.out.println("Stage 1"); TfInTile tf_tile = tf_tiles.get(i);
fb_corr2d.put (tf_tile.corr2d);
// stage 1 float td = tf_tile.target_disparity;
bundle.session().runner() if (Float.isNaN(td)) td = 0.0f;
.feed("ph_corr2d",toTensor2DFloat(img_corr2d, tensorsToClose)) fb_target_disparity.put(i, td);
.feed("ph_target_disparity",toTensor2DFloat(img_target, tensorsToClose)) fb_tiles_stage1.put(tf_tile.tile);
.feed("ph_ntile",toTensor1DInt(img_ntile, tensorsToClose)) fb_tiles_stage2.put(tf_tile.tile);
.fetch("Disparity_net/stage1done:0") }
.run() // fb_target_disparity.limit(tf_tiles.size()); // put float absolute does not movew the pointer
.get(0); fb_target_disparity.position(tf_tiles.size()); // put float absolute does not movew the pointer
fb_tiles_stage1. position(tf_tiles.size()); // put float absolute does not movew the pointer
System.out.println("Stage 1 DONE"); fb_tiles_stage2. position(tf_tiles.size()); // put float absolute does not movew the pointer
//sets the limit to the current position and then sets the position to zero.
System.out.println("Stage 2"); fb_corr2d. flip();
fb_target_disparity.flip();
// stage 2 fb_tiles_stage1. flip();
final Tensor<?> result = bundle.session().runner() fb_tiles_stage2. flip();
.feed("ph_ntile_out",toTensor1DInt(img_ntile, tensorsToClose)) String [] titles = {"predicted", "target", "gt_disparity", "gt_strength", "nn_out", "nn_error","abs_err","abs_heur","clean_nn"};
.fetch("Disparity_net/stage2_out_sparse:0") fb_predicted.rewind(); // not needed for absolute get();
.run() float [][] result = new float [titles.length][tilesX*tilesY];
.get(0); /*
for (int i = 0; i < tf_tiles.size(); i++) {
System.out.println("Stage 2 DONE: "+result.shape()); TfInTile tf_tile = tf_tiles.get(i);
// result[0][i] = tf_tile.target_disparity + fb_predicted.get(i);
tensorsToClose.add(result); result[1][i] = tf_tile.target_disparity;
result[2][i] = tf_tile.gt_disparity;
System.out.println("Copy result to variable"); result[3][i] = tf_tile.gt_strength;
float [][] resultValues = (float[][]) result.copyTo(new float[78408][1]); }
System.out.println("DONE"); (new showDoubleFloatArrays()).showArrays(
result,
//} catch (final IllegalStateException ise) { tilesX,
// System.out.println("Very Bad Error (VBE): "+ise); tilesY,
// closeTensors(tensorsToClose); true,
} catch (final NumberFormatException nfe) { "NN_disparity-pre",
//just skip unparsable lines ?! titles);
} finally { */
closeTensors(tensorsToClose);
}
//if (corr_side > 0) return 0; // always
//try (){ run_stage1(
fb_corr2d, // FloatBuffer fb_corr2d,
//smpb = b.graph(); fb_target_disparity, // FloatBuffer fb_target_disparity,
fb_tiles_stage1); // IntBuffer fb_tiles_stage1);
//Session sess = b.session(); run_stage2(
//System.out.println(b.metaGraphDef()); fb_tiles_stage2, // IntBuffer fb_tiles_stage2,
fb_predicted); // FloatBuffer fb_disparity)
//final List<String> labels = tensorFlowService.loadLabels(source,
// MODEL_NAME, "imagenet_comp_graph_label_strings.txt");
//System.out.println("Loaded graph and " + labels.size() + " labels"); if (!keep_empty) {
for (int i =0; i < result[0].length; i++) {
//output = sess.runner().feed(o, t).fetch().run().get(0).copyTo() result[0][i]=Float.NaN;
result[1][i]=Float.NaN;
/* result[2][i]=Float.NaN;
try ( }
final Session s = new Session(g); }
@SuppressWarnings("unchecked")
final Tensor<Float> result = (Tensor<Float>) s.runner().feed("input", image)// fb_predicted.rewind(); // not needed for absolute get();
.fetch("output").run().get(0) for (int i = 0; i < tf_tiles.size(); i++) {
){ TfInTile tf_tile = tf_tiles.get(i);
... result[0][i] = tf_tile.target_disparity + fb_predicted.get(i);
} result[1][i] = tf_tile.target_disparity;
*/ result[2][i] = tf_tile.gt_disparity;
result[3][i] = tf_tile.gt_strength;
//} result[4][i] = fb_predicted.get(i);
result[5][i] = (result[3][i] > 0)?(result[0][i]-result[2][i]):Float.NaN;
try (Graph g = new Graph()) { result[6][i] = (result[3][i] > 0)?Math.abs(result[5][i]):Float.NaN;
final String value = "Hello from " + TensorFlow.version(); result[7][i] = (result[3][i] > 0)?Math.abs(result[1][i] - result[2][i]):Float.NaN;
result[8][i] = (result[3][i] > 0)?result[0][i]:Float.NaN;
// Construct the computation graph with a single operation, a constant
// named "MyConst" with a value "value". }
try (Tensor t = Tensor.create(value.getBytes("UTF-8"))) { (new showDoubleFloatArrays()).showArrays(
// The Java API doesn't yet include convenience functions for adding operations. result,
g.opBuilder("Const", "MyConst").setAttr("dtype", t.dataType()).setAttr("value", t).build(); tilesX,
} tilesY,
true,
// Execute the "MyConst" operation in a Session. "NN_disparity",
try (Session s = new Session(g); titles);
// Generally, there may be multiple output tensors, all of them must be closed to prevent resource leaks.
Tensor output = s.runner().fetch("MyConst").run().get(0)) { return 0;
System.out.println(new String(output.bytesValue(), "UTF-8")); }
s.close();
output.close();
}
}
}
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment