Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
I
ir-tp-net
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Elphel
ir-tp-net
Commits
21896135
Commit
21896135
authored
Nov 02, 2022
by
Clement Vachet
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Update configuration filenames
parent
fc5c34cb
Pipeline
#2712
failed with stages
Changes
3
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
9 additions
and
89 deletions
+9
-89
AI_Inference_CSV.py
AI_Inference_CSV.py
+4
-33
AI_Inference_Direct.py
AI_Inference_Direct.py
+3
-54
README.md
README.md
+2
-2
No files found.
AI_Inference_CSV.py
View file @
21896135
...
@@ -162,29 +162,18 @@ def main(args=None):
...
@@ -162,29 +162,18 @@ def main(args=None):
padding_mode
=
padding_mode
,
padding_mode
=
padding_mode
,
)
)
len_grid_sampler
=
len
(
grid_sampler
)
len_grid_sampler
=
len
(
grid_sampler
)
#print('length grid_sampler', len(grid_sampler))
patch_loader
=
torch
.
utils
.
data
.
DataLoader
(
grid_sampler
,
batch_size
=
bs
)
patch_loader
=
torch
.
utils
.
data
.
DataLoader
(
grid_sampler
,
batch_size
=
bs
)
aggregator
=
tio
.
inference
.
GridAggregator
(
grid_sampler
,
overlap_mode
=
'average'
)
aggregator
=
tio
.
inference
.
GridAggregator
(
grid_sampler
,
overlap_mode
=
'average'
)
with
torch
.
no_grad
():
with
torch
.
no_grad
():
for
patch_idx
,
patches_batch
in
enumerate
(
patch_loader
):
for
patch_idx
,
patches_batch
in
enumerate
(
patch_loader
):
# print('\n\t\t patch_idx: ', patch_idx)
#print('\t\t Preparing data...')
inputs
=
patches_batch
[
'Combined'
][
tio
.
DATA
]
inputs
=
patches_batch
[
'Combined'
][
tio
.
DATA
]
# print('\t\t inputs shape: ', inputs.shape)
input1_tiles
,
input2_tiles_real
,
GroundTruth_real
=
dataset
.
prepare_data_withfiltering
(
inputs
,
nb_image_layers
,
nb_corr_layers
,
tile_size
,
adjacent_tiles_dim
)
input1_tiles
,
input2_tiles_real
,
GroundTruth_real
=
dataset
.
prepare_data_withfiltering
(
inputs
,
nb_image_layers
,
nb_corr_layers
,
tile_size
,
adjacent_tiles_dim
)
#print('\t\t Preparing data - done -')
input1_tiles
=
input1_tiles
.
to
(
device
)
input1_tiles
=
input1_tiles
.
to
(
device
)
input2_tiles_real
=
input2_tiles_real
.
to
(
device
)
input2_tiles_real
=
input2_tiles_real
.
to
(
device
)
#GroundTruth_real = GroundTruth_real.to(self.device)
# Reducing last dimension to compute loss
#GroundTruth_real = torch.squeeze(GroundTruth_real, dim=2)
# print('\t\t input1_tiles shape: ', input1_tiles.shape)
# print('\t\t input2_tiles_real shape:', input2_tiles_real.shape)
if
mc_dropout
:
if
mc_dropout
:
# Perform multiple inference (mc_passes)
# Perform multiple inference (mc_passes)
...
@@ -193,47 +182,37 @@ def main(args=None):
...
@@ -193,47 +182,37 @@ def main(args=None):
outputs
=
model
(
input1_tiles
,
input2_tiles_real
)
outputs
=
model
(
input1_tiles
,
input2_tiles_real
)
outputs_all
[
i
]
=
torch
.
squeeze
(
outputs
)
outputs_all
[
i
]
=
torch
.
squeeze
(
outputs
)
# Compute mean, median, std, CV (coefficient of variation)
, SE (standard error)
# Compute mean, median, std, CV (coefficient of variation)
outputs_mean
=
torch
.
mean
(
outputs_all
,
0
)
outputs_mean
=
torch
.
mean
(
outputs_all
,
0
)
outputs_median
=
torch
.
median
(
outputs_all
,
0
)[
0
]
outputs_median
=
torch
.
median
(
outputs_all
,
0
)[
0
]
outputs_std
=
torch
.
std
(
outputs_all
,
0
)
outputs_std
=
torch
.
std
(
outputs_all
,
0
)
outputs_cv
=
torch
.
div
(
outputs_std
,
torch
.
abs
(
outputs_mean
))
outputs_cv
=
torch
.
div
(
outputs_std
,
torch
.
abs
(
outputs_mean
))
# outputs_se = torch.div(outputs_std, math.sqrt(mc_passes))
outputs_combined
=
torch
.
stack
((
outputs_mean
,
outputs_median
,
outputs_cv
),
dim
=
1
)
outputs_combined
=
torch
.
stack
((
outputs_mean
,
outputs_median
,
outputs_cv
),
dim
=
1
)
else
:
else
:
outputs_combined
=
model
(
input1_tiles
,
input2_tiles_real
)
outputs_combined
=
model
(
input1_tiles
,
input2_tiles_real
)
# print('\t\t outputs_combined shape: ', outputs_combined.shape)
# print('outputs_combined device', outputs_combined.device)
# Reshape outputs
# Reshape outputs
outputs_combined_reshape
=
torch
.
reshape
(
outputs_combined
,[
outputs_combined
.
shape
[
0
],
outputs_combined
.
shape
[
1
],
1
,
1
,
1
])
outputs_combined_reshape
=
torch
.
reshape
(
outputs_combined
,[
outputs_combined
.
shape
[
0
],
outputs_combined
.
shape
[
1
],
1
,
1
,
1
])
print
(
'
\t\t
outputs_combined_reshape shape: '
,
outputs_combined_reshape
.
shape
)
print
(
'
\t\t
outputs_combined_reshape shape: '
,
outputs_combined_reshape
.
shape
)
input_location
=
patches_batch
[
tio
.
LOCATION
]
input_location
=
patches_batch
[
tio
.
LOCATION
]
# print('\t\t input_location shape: ', input_location.shape)
# print('\t\t input_location: ', input_location)
# Reshape input_location to prediction_location, to fit output image size (78,62,1)
# Reshape input_location to prediction_location, to fit output image size (78,62,1)
pred_location
=
dataset
.
prediction_patch_location
(
input_location
,
tile_size
,
adjacent_tiles_dim
)
pred_location
=
dataset
.
prediction_patch_location
(
input_location
,
tile_size
,
adjacent_tiles_dim
)
# print('\t\t pred_location shape: ', pred_location.shape)
# print('\t\t pred_location: ', pred_location)
# Add batch with location to TorchIO aggregator
# Add batch with location to TorchIO aggregator
aggregator
.
add_batch
(
outputs_combined_reshape
,
pred_location
)
aggregator
.
add_batch
(
outputs_combined_reshape
,
pred_location
)
# output_tensor shape [3, 1170, 930, 122]
# output_tensor shape [3, 1170, 930, 122]
output_tensor_combined
=
aggregator
.
get_output_tensor
()
output_tensor_combined
=
aggregator
.
get_output_tensor
()
# print('output_tensor_combined type: ', output_tensor_combined.dtype)
# print('output_tensor_combined device', output_tensor_combined.device)
# print('output_tensor_combined shape: ', output_tensor_combined.shape)
# Extract real information of interest [3,78,62]
# Extract real information of interest [3,78,62]
output_tensor_combined_real
=
output_tensor_combined
[:,:
NbTiles_H
,:
NbTiles_W
,
0
]
output_tensor_combined_real
=
output_tensor_combined
[:,:
NbTiles_H
,:
NbTiles_W
,
0
]
# print('output_tensor_combined_real shape: ', output_tensor_combined_real.shape)
output_combined_np
=
output_tensor_combined_real
.
numpy
()
.
squeeze
()
output_combined_np
=
output_tensor_combined_real
.
numpy
()
.
squeeze
()
# print('output_combined_np type', output_combined_np.dtype)
# print('output_combined_np shape', output_combined_np.shape)
if
mc_dropout
:
if
mc_dropout
:
output_mean_np
=
output_combined_np
[
0
,
...
]
output_mean_np
=
output_combined_np
[
0
,
...
]
...
@@ -243,26 +222,18 @@ def main(args=None):
...
@@ -243,26 +222,18 @@ def main(args=None):
imageio_output_mean
=
np
.
moveaxis
(
output_mean_np
,
0
,
1
)
imageio_output_mean
=
np
.
moveaxis
(
output_mean_np
,
0
,
1
)
imageio_output_median
=
np
.
moveaxis
(
output_median_np
,
0
,
1
)
imageio_output_median
=
np
.
moveaxis
(
output_median_np
,
0
,
1
)
imageio_output_cv
=
np
.
moveaxis
(
output_cv_np
,
0
,
1
)
imageio_output_cv
=
np
.
moveaxis
(
output_cv_np
,
0
,
1
)
# print('imageio_output_mean shape', imageio_output_mean.shape)
# print('imageio_output_median shape', imageio_output_median.shape)
# print('imageio_output_cv shape', imageio_output_cv.shape)
else
:
else
:
output_np
=
output_combined_np
output_np
=
output_combined_np
imageio_output
=
np
.
moveaxis
(
output_np
,
0
,
1
)
imageio_output
=
np
.
moveaxis
(
output_np
,
0
,
1
)
# print('imageio_output shape', imageio_output.shape)
time_elapsed2
=
time
.
time
()
-
since2
time_elapsed2
=
time
.
time
()
-
since2
time_inference_list
.
append
(
time_elapsed2
)
time_inference_list
.
append
(
time_elapsed2
)
if
mc_dropout
:
if
mc_dropout
:
# print('Writing output mean image via imageio...')
imageio
.
imwrite
(
PredictionFile_mean
,
imageio_output_mean
)
imageio
.
imwrite
(
PredictionFile_mean
,
imageio_output_mean
)
# print('Writing output median image via imageio...')
imageio
.
imwrite
(
PredictionFile_median
,
imageio_output_median
)
imageio
.
imwrite
(
PredictionFile_median
,
imageio_output_median
)
# print('Writing output CV image via imageio...')
imageio
.
imwrite
(
PredictionFile_cv
,
imageio_output_cv
)
imageio
.
imwrite
(
PredictionFile_cv
,
imageio_output_cv
)
else
:
else
:
# print('Writing output image via imageio...')
imageio
.
imwrite
(
PredictionFile
,
imageio_output
)
imageio
.
imwrite
(
PredictionFile
,
imageio_output
)
print
(
'
\t\t
Inference in {:.2f}s---'
.
format
(
time_elapsed2
))
print
(
'
\t\t
Inference in {:.2f}s---'
.
format
(
time_elapsed2
))
...
...
AI_Inference_Direct.py
View file @
21896135
...
@@ -92,19 +92,11 @@ def main(args=None):
...
@@ -92,19 +92,11 @@ def main(args=None):
NbImageLayers
=
InputFile_Shape
[
3
]
NbImageLayers
=
InputFile_Shape
[
3
]
NbCorrLayers
=
NbImageLayers
-
4
NbCorrLayers
=
NbImageLayers
-
4
InputDepth
=
NbCorrLayers
InputDepth
=
NbCorrLayers
print
(
'InputFile_Shape: '
,
InputFile_Shape
)
print
(
'NbTiles_H: '
,
NbTiles_H
)
print
(
'NbTiles_W: '
,
NbTiles_W
)
print
(
'NbImageLayers: '
,
NbImageLayers
)
print
(
'InputDepth: '
,
InputDepth
)
# GridSampler
# GridSampler
print
(
'
\n
Generating Grid Sampler...'
)
print
(
'
\n
Generating Grid Sampler...'
)
patch_size
,
patch_overlap
,
padding_mode
=
dataset
.
initialize_gridsampler_variables
(
NbImageLayers
,
TileSize
,
AdjacentTilesDim
,
padding_mode
=
None
)
patch_size
,
patch_overlap
,
padding_mode
=
dataset
.
initialize_gridsampler_variables
(
NbImageLayers
,
TileSize
,
AdjacentTilesDim
,
padding_mode
=
None
)
print
(
'patch_size: '
,
patch_size
)
print
(
'patch_overlap: '
,
patch_overlap
)
print
(
'padding_mode: '
,
padding_mode
)
grid_sampler
=
tio
.
data
.
GridSampler
(
grid_sampler
=
tio
.
data
.
GridSampler
(
...
@@ -114,7 +106,6 @@ def main(args=None):
...
@@ -114,7 +106,6 @@ def main(args=None):
padding_mode
=
padding_mode
,
padding_mode
=
padding_mode
,
)
)
len_grid_sampler
=
len
(
grid_sampler
)
len_grid_sampler
=
len
(
grid_sampler
)
print
(
'length grid_sampler'
,
len
(
grid_sampler
))
patch_loader
=
torch
.
utils
.
data
.
DataLoader
(
grid_sampler
,
batch_size
=
bs
)
patch_loader
=
torch
.
utils
.
data
.
DataLoader
(
grid_sampler
,
batch_size
=
bs
)
aggregator
=
tio
.
data
.
GridAggregator
(
grid_sampler
,
overlap_mode
=
'average'
)
aggregator
=
tio
.
data
.
GridAggregator
(
grid_sampler
,
overlap_mode
=
'average'
)
...
@@ -134,22 +125,12 @@ def main(args=None):
...
@@ -134,22 +125,12 @@ def main(args=None):
#model = nn.Identity().eval()
#model = nn.Identity().eval()
with
torch
.
no_grad
():
with
torch
.
no_grad
():
for
patch_idx
,
patches_batch
in
enumerate
(
patch_loader
):
for
patch_idx
,
patches_batch
in
enumerate
(
patch_loader
):
print
(
'
\t
patch_idx: '
,
patch_idx
)
#print('\t\t Preparing data...')
inputs
=
patches_batch
[
'Combined'
][
tio
.
DATA
]
inputs
=
patches_batch
[
'Combined'
][
tio
.
DATA
]
print
(
'
\t\t
inputs shape: '
,
inputs
.
shape
)
input1_tiles
,
input2_tiles_real
,
GroundTruth_real
=
dataset
.
prepare_data_withfiltering
(
inputs
,
NbImageLayers
,
NbCorrLayers
,
TileSize
,
AdjacentTilesDim
)
input1_tiles
,
input2_tiles_real
,
GroundTruth_real
=
dataset
.
prepare_data_withfiltering
(
inputs
,
NbImageLayers
,
NbCorrLayers
,
TileSize
,
AdjacentTilesDim
)
#print('\t\t Preparing data - done -')
input1_tiles
=
input1_tiles
.
to
(
device
)
input1_tiles
=
input1_tiles
.
to
(
device
)
input2_tiles_real
=
input2_tiles_real
.
to
(
device
)
input2_tiles_real
=
input2_tiles_real
.
to
(
device
)
#GroundTruth_real = GroundTruth_real.to(device)
# Reducing last dimension to compute loss
#GroundTruth_real = torch.squeeze(GroundTruth_real, dim=2)
print
(
'
\t\t
input1_tiles shape: '
,
input1_tiles
.
shape
)
print
(
'
\t\t
input2_tiles_real shape:'
,
input2_tiles_real
.
shape
)
if
mc_dropout
:
if
mc_dropout
:
# Perform multiple inference (mc_passes)
# Perform multiple inference (mc_passes)
...
@@ -158,62 +139,34 @@ def main(args=None):
...
@@ -158,62 +139,34 @@ def main(args=None):
outputs
=
model
(
input1_tiles
,
input2_tiles_real
)
outputs
=
model
(
input1_tiles
,
input2_tiles_real
)
outputs_all
[
i
]
=
torch
.
squeeze
(
outputs
)
outputs_all
[
i
]
=
torch
.
squeeze
(
outputs
)
# Compute mean, std, CV (coefficient of variation)
, SE (standard error)
# Compute mean, std, CV (coefficient of variation)
outputs_mean
=
torch
.
mean
(
outputs_all
,
0
)
outputs_mean
=
torch
.
mean
(
outputs_all
,
0
)
outputs_median
=
torch
.
median
(
outputs_all
,
0
)[
0
]
outputs_median
=
torch
.
median
(
outputs_all
,
0
)[
0
]
outputs_std
=
torch
.
std
(
outputs_all
,
0
)
outputs_std
=
torch
.
std
(
outputs_all
,
0
)
outputs_cv
=
torch
.
div
(
outputs_std
,
torch
.
abs
(
outputs_mean
))
outputs_cv
=
torch
.
div
(
outputs_std
,
torch
.
abs
(
outputs_mean
))
# outputs_se = torch.div(outputs_std, math.sqrt(mc_passes))
outputs_combined
=
torch
.
stack
((
outputs_mean
,
outputs_median
,
outputs_cv
),
dim
=
1
)
outputs_combined
=
torch
.
stack
((
outputs_mean
,
outputs_median
,
outputs_cv
),
dim
=
1
)
print
(
'
\t\t
outputs shape: '
,
outputs
.
shape
)
print
(
'
\t\t
outputs device'
,
outputs
.
device
)
print
(
'
\t\t
outputs_all shape: '
,
outputs_all
.
shape
)
print
(
'
\t\t
outputs_all device'
,
outputs_all
.
device
)
print
(
'
\t\t
outputs_mean shape: '
,
outputs_mean
.
shape
)
print
(
'
\t\t
outputs_median shape: '
,
outputs_median
.
shape
)
print
(
'
\t\t
outputs_median type: '
,
outputs_median
.
type
())
print
(
'
\t\t
outputs_combined shape: '
,
outputs_combined
.
shape
)
print
(
'
\t\t
outputs_mean[:20]'
,
outputs_mean
[:
20
])
print
(
'
\t\t
outputs_median[:20]'
,
outputs_median
[:
20
])
print
(
'
\t\t
outputs_std[:20]'
,
outputs_std
[:
20
])
print
(
'
\t\t
outputs_cv[:20]'
,
outputs_cv
[:
20
])
else
:
else
:
outputs_combined
=
model
(
input1_tiles
,
input2_tiles_real
)
outputs_combined
=
model
(
input1_tiles
,
input2_tiles_real
)
print
(
'
\t\t
outputs_combined device'
,
outputs_combined
.
device
)
print
(
'
\t\t
outputs_combined shape: '
,
outputs_combined
.
shape
)
# Reshape outputs to match location dimensions
# Reshape outputs to match location dimensions
outputs_combined_reshape
=
torch
.
reshape
(
outputs_combined
,[
outputs_combined
.
shape
[
0
],
outputs_combined
.
shape
[
1
],
1
,
1
,
1
])
outputs_combined_reshape
=
torch
.
reshape
(
outputs_combined
,[
outputs_combined
.
shape
[
0
],
outputs_combined
.
shape
[
1
],
1
,
1
,
1
])
print
(
'
\t\t
outputs_combined_reshape shape: '
,
outputs_combined_reshape
.
shape
)
input_location
=
patches_batch
[
tio
.
LOCATION
]
input_location
=
patches_batch
[
tio
.
LOCATION
]
print
(
'
\t\t
input_location shape: '
,
input_location
.
shape
)
print
(
'
\t\t
input_location type: '
,
input_location
.
dtype
)
print
(
'
\t\t
input_location[:20]: '
,
input_location
[:
20
])
# Reshape input_location to prediction_location, to fit output image size (78,62,1)
# Reshape input_location to prediction_location, to fit output image size (78,62,1)
pred_location
=
dataset
.
prediction_patch_location
(
input_location
,
TileSize
,
AdjacentTilesDim
)
pred_location
=
dataset
.
prediction_patch_location
(
input_location
,
TileSize
,
AdjacentTilesDim
)
print
(
'
\t\t
pred_location shape: '
,
pred_location
.
shape
)
print
(
'
\t\t
pred_location[:20]: '
,
pred_location
[:
20
])
# Add batch with location to TorchIO aggregator
# Add batch with location to TorchIO aggregator
aggregator
.
add_batch
(
outputs_combined_reshape
,
pred_location
)
aggregator
.
add_batch
(
outputs_combined_reshape
,
pred_location
)
# output_tensor shape [3, 1170, 930, 124]
# output_tensor shape [3, 1170, 930, 124]
output_tensor_combined
=
aggregator
.
get_output_tensor
()
output_tensor_combined
=
aggregator
.
get_output_tensor
()
print
(
'output_tensor_combined type: '
,
output_tensor_combined
.
dtype
)
print
(
'output_tensor_combined shape: '
,
output_tensor_combined
.
shape
)
# Extract real information of interest [3, 78,62]
# Extract real information of interest [3, 78,62]
output_tensor_combined_real
=
output_tensor_combined
[:,:
NbTiles_H
,:
NbTiles_W
,
0
]
output_tensor_combined_real
=
output_tensor_combined
[:,:
NbTiles_H
,:
NbTiles_W
,
0
]
print
(
'output_tensor_combined_real shape: '
,
output_tensor_combined_real
.
shape
)
output_combined_np
=
output_tensor_combined_real
.
numpy
()
.
squeeze
()
output_combined_np
=
output_tensor_combined_real
.
numpy
()
.
squeeze
()
print
(
'output_combined_np type'
,
output_combined_np
.
dtype
)
print
(
'output_combined_np shape'
,
output_combined_np
.
shape
)
if
mc_dropout
:
if
mc_dropout
:
output_mean_np
=
output_combined_np
[
0
,
...
]
output_mean_np
=
output_combined_np
[
0
,
...
]
...
@@ -223,14 +176,10 @@ def main(args=None):
...
@@ -223,14 +176,10 @@ def main(args=None):
imageio_output_mean
=
np
.
moveaxis
(
output_mean_np
,
0
,
1
)
imageio_output_mean
=
np
.
moveaxis
(
output_mean_np
,
0
,
1
)
imageio_output_median
=
np
.
moveaxis
(
output_median_np
,
0
,
1
)
imageio_output_median
=
np
.
moveaxis
(
output_median_np
,
0
,
1
)
imageio_output_cv
=
np
.
moveaxis
(
output_cv_np
,
0
,
1
)
imageio_output_cv
=
np
.
moveaxis
(
output_cv_np
,
0
,
1
)
print
(
'imageio_output_mean shape'
,
imageio_output_mean
.
shape
)
print
(
'imageio_output_median shape'
,
imageio_output_median
.
shape
)
print
(
'imageio_output_cv shape'
,
imageio_output_cv
.
shape
)
else
:
else
:
output_np
=
output_combined_np
output_np
=
output_combined_np
imageio_output
=
np
.
moveaxis
(
output_np
,
0
,
1
)
imageio_output
=
np
.
moveaxis
(
output_np
,
0
,
1
)
print
(
'imageio_output shape'
,
imageio_output
.
shape
)
time_elapsed2
=
time
.
time
()
-
since2
time_elapsed2
=
time
.
time
()
-
since2
...
...
README.md
View file @
21896135
...
@@ -29,12 +29,12 @@ Update your CSV files to point to your training and validation datasets.
...
@@ -29,12 +29,12 @@ Update your CSV files to point to your training and validation datasets.
Command line:
Command line:
```
```
python3 AI_Training.py --config ./Config_Files/AI_Training_Config.yaml
python3 AI_Training.py --config ./Config_Files/AI_Training_Config
_Tiles3x3
.yaml
```
```
### 3.2 DNN inference using configuration file
### 3.2 DNN inference using configuration file
Command line:
Command line:
```
```
python3 AI_Inference_CSV.py --config ./Config_Files/AI_Inference_Config.yaml --verbose
python3 AI_Inference_CSV.py --config ./Config_Files/AI_Inference_Config
_Tiles3x3
.yaml --verbose
```
```
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment