Commit e165d3e4 authored by Clement Vachet's avatar Clement Vachet

First commit

parents
Pipeline #2707 canceled with stages
#!/usr/bin/env python3
import imageio
import numpy as np
import sys
import argparse
import time
def arg_parser():
parser = argparse.ArgumentParser(description='Concatenating layer to 3D volume')
required = parser.add_argument_group('Required')
required.add_argument('--input', type=str, required=True,
help='3D TIFF file (multi-layer)')
required.add_argument('--layer1', type=str, required=True,
help='2D TIFF file (single layer)')
required.add_argument('--output', type=str, required=True,
help='Combined TIFF file (multi-layer)')
options = parser.add_argument_group('Options')
required.add_argument('--layer2', type=str, required=True,
help='2D TIFF file (single layer)')
options.add_argument('--repeat', type=int, default=15,
help='Repeat tile number (default 15)')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
imgInput_name = args.input
imgLayer1_name = args.layer1
imgLayer2_name = args.layer2
output_name = args.output
repeatNb = args.repeat
# Read Combined image - all layers
print("\nReading input file - 3D volume...")
imgInput = imageio.mimread(imgInput_name,memtest=False)
imgInput = np.array(imgInput)
# print('\nimgInput type: ', imgInput.dtype)
print('\t imgInput shape: ', imgInput.shape)
# Read Layer1 image
print("Reading Layer1 file...")
imgLayer1 = imageio.imread(imgLayer1_name)
# print('imgLayer1 type: ', imgLayer1.dtype)
print('\t imgLayer1 shape: ', imgLayer1.shape)
# Resample layer1 (repeating values, to match input size)
imgLayer1_repeat0 = np.repeat(imgLayer1, repeatNb, axis=0)
imgLayer1_repeat = np.repeat(imgLayer1_repeat0, repeatNb, axis=1)
imgLayer1_repeat = np.expand_dims(imgLayer1_repeat, axis=0)
print('\t imgLayer1_repeat shape: ', imgLayer1_repeat.shape)
# Stack layer to imgInput, to generate one 3D volume
print("Adding 2D layer to 3D volume...")
imgAll = np.concatenate((imgInput,imgLayer1_repeat), axis = 0)
if args.layer2:
print("Reading Layer2 file...")
imgLayer2 = imageio.imread(imgLayer2_name)
# print('imgLayer2 type: ', imgLayer2.dtype)
print('\t imgLayer2 shape: ', imgLayer2.shape)
imgLayer2_repeat0 = np.repeat(imgLayer2, repeatNb, axis=0)
imgLayer2_repeat = np.repeat(imgLayer2_repeat0, repeatNb, axis=1)
imgLayer2_repeat = np.expand_dims(imgLayer2_repeat, axis=0)
print('\t imgLayer2_repeat shape: ', imgLayer2_repeat.shape)
# Stack layer to imgInput, to generate one 3D volume
print("Adding 2D layer to 3D volume...")
imgAll = np.concatenate((imgAll,imgLayer2_repeat), axis = 0)
print("Saving output volume...")
print('\t imgAll shape: ', imgAll.shape)
imageio.mimwrite(output_name,imgAll)
Time2 = time.time()
TimeDiff = Time2 - Time1
print("Execution Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Data analysis - density computation')
required = parser.add_argument_group('Required')
required.add_argument('--pred', type=str, required=True,
help='Prediction TIFF file (single layer)')
required.add_argument('--groundtruth', type=str, required=True,
help='Ground Truth TIFF file (single layer)')
required.add_argument('--adjtilesdim', type=int, required=True,
help='Adjacent tiles dimensions (e.g. 1, 3 or 5) to exclude NaN border')
required.add_argument('--output', type=str, required=True,
help='CSV file')
options = parser.add_argument_group('Options')
options.add_argument('--inclusionmask', type=str,
help='save inclusion mask as output (diff < 2 pixels)')
options.add_argument('--exclusionmask', type=str,
help='save exclusion mask as output (diff >= 2 pixels)')
options.add_argument('--threshold', type=float, default = 2.0,
help='threshold for inclusion / exclusion mask')
options.add_argument('--verbose', action="store_true",
help='verbose mode')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
# print(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
imgPred_name = args.pred
imgPred_basename = os.path.basename(imgPred_name)
AdjacentTilesDim = args.adjtilesdim
imgGT_name = args.groundtruth
output_name = args.output
inclusion_mask_name = args.inclusionmask
exclusion_mask_name = args.exclusionmask
threshold = args.threshold
# Read Pred and GroundTruth images
#print("Reading Pred file...")
imgPred = imageio.imread(imgPred_name)
#print("Reading GroundTruth file...")
imgGT = imageio.imread(imgGT_name)
# Remove NaN border when needed
imgPred_Crop = imgPred
imgGT_Crop = imgGT
if (AdjacentTilesDim == 3):
Border = 1
imgPred_Crop = imgPred[Border:-Border,Border:-Border]
imgGT_Crop = imgGT[Border:-Border,Border:-Border]
elif (AdjacentTilesDim == 5):
Border = 2
imgPred_Crop = imgPred[Border:-Border,Border:-Border]
imgGT_Crop = imgGT[Border:-Border,Border:-Border]
# Quality control - NaN
TestNaN_imgPred_Crop = np.any(np.isnan(imgPred_Crop))
TestNaN_imgGT_Crop = np.any(np.isnan(imgGT_Crop))
# # List indices with Nan values
# ListNaN_imgPred_Crop = np.argwhere(np.isnan(imgPred_Crop))
# print('ListNaN_imgPred_Crop: ', ListNaN_imgPred_Crop)
# Verbose mode
if args.verbose:
print('imgPred type: ', imgPred.dtype)
print('imgPred shape: ', imgPred.shape)
print('imgGT type: ', imgGT.dtype)
print('imgGT shape: ', imgGT.shape)
print('imgPred_Crop shape: ', imgPred_Crop.shape)
print('imgGT_Crop shape: ', imgGT_Crop.shape)
print('TestNaN_imgPred_Crop: ', TestNaN_imgPred_Crop)
print('TestNaN_imgGT_Crop: ', TestNaN_imgGT_Crop)
# Compute Image difference
np_diff = np.abs(imgPred_Crop - imgGT_Crop)
print('np_diff type: ', np_diff.dtype)
print('np_diff shape: ', np_diff.shape)
# Generate output mask
np_exclusionmask = np.uint8(np.where(np_diff >= threshold, 1, 0))
np_inclusionmask = np.uint8(np.where(np_diff < threshold, 1, 0))
print('np_inclusionmask type: ', np_inclusionmask.dtype)
print('np_inclusionmask shape: ', np_inclusionmask.shape)
# Compute density
density = np.sum(np_inclusionmask) / (np_inclusionmask.shape[0] * np_inclusionmask.shape[1])
print('density: ',density)
QC_data = np.array([[imgPred_basename,density]])
Columns = ['FileName','Density']
df = pd.DataFrame(QC_data,columns=Columns)
print(df.head())
df.to_csv(output_name, index=False)
if inclusion_mask_name is not None:
print('\t\t Writing output inclusion mask - imageio...')
imageio.imwrite(inclusion_mask_name, np_inclusionmask)
if inclusion_mask_name is not None:
print('\t\t Writing output exclusion mask - imageio...')
imageio.imwrite(exclusion_mask_name, np_exclusionmask)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Data analysis - NaN computation')
required = parser.add_argument_group('Required')
required.add_argument('--disp_lma', type=str, required=True,
help='Prediction TIFF file (single layer)')
required.add_argument('--output', type=str, required=True,
help='Output CSV file')
options = parser.add_argument_group('Options')
options.add_argument('--verbose', action="store_true",
help='verbose mode')
options.add_argument('--mask', type=str,
help='verbose mode')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
DispLMA_name = args.disp_lma
output_name = args.output
# Read Pred and GroundTruth images
#print("Reading Pred file...")
imgDispLMA = imageio.imread(DispLMA_name)
# Verbose mode
if args.verbose:
print('imgDispLMA type: ', imgDispLMA.dtype)
print('imgDispLMA shape: ', imgDispLMA.shape)
# print('imgGT type: ', imgGT.dtype)
# Compute NaN
Bool_NaN = np.isnan(imgDispLMA)
Nb_NaN = np.count_nonzero(Bool_NaN)
Nb_NotNaN = np.count_nonzero(~Bool_NaN)
Nb_Total = imgDispLMA.shape[0] * imgDispLMA.shape[1]
NaN_Percent = Nb_NaN / Nb_Total
if args.verbose:
print('Nb_NaN: ', Nb_NaN)
print('Nb_NotNaN: ', Nb_NotNaN)
print('Nb_Total: ', Nb_Total)
print('NaN_Percent: ', NaN_Percent)
# Mask
DispLMA_Mask = np.uint8(np.reshape(~Bool_NaN, imgDispLMA.shape))
if args.verbose:
print('DispLMA_Mask type: ', DispLMA_Mask.dtype)
print('DispLMA_Mask shape: ', DispLMA_Mask.shape)
QC_data = np.array([[DispLMA_name, Nb_NaN, Nb_NotNaN, Nb_Total, NaN_Percent]])
Columns = ['FileName','Nb_NaN', 'Nb_NotNaN', 'Nb_Total', 'NaN_Percent']
df = pd.DataFrame(QC_data,columns=Columns)
print(df.head())
df.to_csv(output_name, index=False)
if (args.mask is not None):
Mask_FileName = args.mask
imageio.imwrite(Mask_FileName, DispLMA_Mask * 255)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Data analysis - RMSE computation')
required = parser.add_argument_group('Required')
required.add_argument('--pred', type=str, required=True,
help='Prediction TIFF file (single layer)')
required.add_argument('--groundtruth', type=str, required=True,
help='Ground Truth TIFF file (single layer)')
required.add_argument('--adjtilesdim', type=int, required=True,
help='Adjacent tiles dimensions (e.g. 1, 3 or 5) to exclude NaN border')
required.add_argument('--output', type=str, required=True,
help='CSV file')
options = parser.add_argument_group('Options')
options.add_argument('--verbose', action="store_true",
help='verbose mode')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
imgPred_name = args.pred
imgPred_basename = os.path.basename(imgPred_name)
AdjacentTilesDim = args.adjtilesdim
imgGT_name = args.groundtruth
output_name = args.output
# Read Pred and GroundTruth images
#print("Reading Pred file...")
imgPred = imageio.imread(imgPred_name)
#print("Reading GroundTruth file...")
imgGT = imageio.imread(imgGT_name)
# Remove NaN border when needed
imgPred_Crop = imgPred
imgGT_Crop = imgGT
if (AdjacentTilesDim == 3):
Border = 1
imgPred_Crop = imgPred[Border:-Border,Border:-Border]
imgGT_Crop = imgGT[Border:-Border,Border:-Border]
elif (AdjacentTilesDim == 5):
Border = 2
imgPred_Crop = imgPred[Border:-Border,Border:-Border]
imgGT_Crop = imgGT[Border:-Border,Border:-Border]
# Quality control - NaN
TestNaN_imgPred_Crop = np.any(np.isnan(imgPred_Crop))
TestNaN_imgGT_Crop = np.any(np.isnan(imgGT_Crop))
# # List indices with Nan values
# ListNaN_imgPred_Crop = np.argwhere(np.isnan(imgPred_Crop))
# print('ListNaN_imgPred_Crop: ', ListNaN_imgPred_Crop)
# Verbose mode
if args.verbose:
# print('imgPred type: ', imgPred.dtype)
print('imgPred shape: ', imgPred.shape)
# print('imgGT type: ', imgGT.dtype)
print('imgGT shape: ', imgGT.shape)
print('imgPred_Crop shape: ', imgPred_Crop.shape)
print('imgGT_Crop shape: ', imgGT_Crop.shape)
print('TestNaN_imgPred_Crop: ', TestNaN_imgPred_Crop)
print('TestNaN_imgGT_Crop: ', TestNaN_imgGT_Crop)
# Compute RMSE
rmse = mean_squared_error(imgGT_Crop, imgPred_Crop, squared=False)
print('rmse: ',rmse)
QC_data = np.array([[imgPred_basename,rmse]])
Columns = ['FileName','RMSE']
df = pd.DataFrame(QC_data,columns=Columns)
print(df.head())
df.to_csv(output_name, index=False)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Data analysis - RMSE computation')
required = parser.add_argument_group('Required')
required.add_argument('--pred', type=str, required=True,
help='Prediction TIFF file (single layer)')
required.add_argument('--groundtruth', type=str, required=True,
help='Ground Truth TIFF file (single layer)')
required.add_argument('--confidence', type=str, required=True,
help='Confidence TIFF file (single layer)')
required.add_argument('--disp_lma', type=str, required=True,
help='LMA disparity TIFF file (single layer)')
required.add_argument('--adjtilesdim', type=int, required=True,
help='Adjacent tiles dimensions (e.g. 1, 3 or 5) to exclude NaN border')
required.add_argument('--output', type=str, required=True,
help='CSV file')
options = parser.add_argument_group('Options')
options.add_argument('--threshold', type=float, default = 0.15,
help='threshold on confidence map')
options.add_argument('--output_mask', type=str,
help='output mask image (TIFF file)')
options.add_argument('--verbose', action="store_true",
help='verbose mode')
return parser
# Remove NaN border
def cropping(img, border):
if border == 0:
return img
else:
img_cropped = img[border:-border,border:-border]
return img_cropped
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
imgPred_name = args.pred
imgPred_basename = os.path.basename(imgPred_name)
AdjacentTilesDim = args.adjtilesdim
imgGT_name = args.groundtruth
imgDispLMA_name = args.disp_lma
imgConfidence_name = args.confidence
output_name = args.output
outputmask_name = args.output_mask
threshold = args.threshold
# Read images
#print("Reading Pred file...")
imgPred = imageio.imread(imgPred_name)
#print("Reading GroundTruth file...")
imgGT = imageio.imread(imgGT_name)
#print("Reading Confidence file...")
imgConfidence = imageio.imread(imgConfidence_name)
#print("Reading DispLMA file...")
imgDispLMA = imageio.imread(imgDispLMA_name)
# Remove NaN border when needed
if (AdjacentTilesDim == 3):
Border = 1
elif (AdjacentTilesDim == 5):
Border = 2
else:
Border = 0
imgPred_Crop = cropping(imgPred, Border)
imgGT_Crop = cropping(imgGT, Border)
imgConfidence_Crop = cropping(imgConfidence, Border)
imgDispLMA_Crop = cropping(imgDispLMA, Border)
# Quality control - NaN
TestNaN_imgPred_Crop = np.any(np.isnan(imgPred_Crop))
TestNaN_imgGT_Crop = np.any(np.isnan(imgGT_Crop))
# # List indices with Nan values
# ListNaN_imgPred_Crop = np.argwhere(np.isnan(imgPred_Crop))
# print('ListNaN_imgPred_Crop: ', ListNaN_imgPred_Crop)
# Verbose mode
if args.verbose:
# print('imgPred type: ', imgPred.dtype)
print('imgPred shape: ', imgPred.shape)
# print('imgGT type: ', imgGT.dtype)
print('imgGT shape: ', imgGT.shape)
print('imgPred_Crop shape: ', imgPred_Crop.shape)
print('imgGT_Crop shape: ', imgGT_Crop.shape)
print('TestNaN_imgPred_Crop: ', TestNaN_imgPred_Crop)
print('TestNaN_imgGT_Crop: ', TestNaN_imgGT_Crop)
# Define sample_weight using Confidence and DispLMA maps
imgDispLMAMask_Crop = np.uint8(np.reshape(~np.isnan(imgDispLMA_Crop), imgDispLMA_Crop.shape))
imgConfidenceMask_Crop = np.uint8(np.where(imgConfidence_Crop >= threshold, 1, 0))
imgSampleWeight_Crop = np.uint8(np.logical_and(imgDispLMAMask_Crop, imgConfidenceMask_Crop))
if outputmask_name is not None:
imageio.imwrite(outputmask_name, imgSampleWeight_Crop * 255)
# Compute RMSE
rmse = mean_squared_error(imgGT_Crop, imgPred_Crop, sample_weight=imgSampleWeight_Crop, squared=False)
print('rmse: ',rmse)
QC_data = np.array([[imgPred_basename,rmse]])
Columns = ['FileName','RMSE']
df = pd.DataFrame(QC_data,columns=Columns)
#print(df.head())
print('Saving CSV file...')
df.to_csv(output_name, index=False)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Data analysis - RMSE computation')
required = parser.add_argument_group('Required')
required.add_argument('--pred', type=str, required=True,
help='Prediction TIFF file (single layer)')
required.add_argument('--groundtruth', type=str, required=True,
help='Ground Truth TIFF file (single layer)')
required.add_argument('--adjtilesdim', type=int, required=True,
help='Adjacent tiles dimensions (e.g. 1, 3 or 5) to exclude NaN border')
required.add_argument('--output', type=str, required=True,
help='CSV file')
options = parser.add_argument_group('Options')
options.add_argument('--threshold', type=float, default = 2.0,
help='threshold on image difference')
options.add_argument('--output_mask', type=str,
help='output mask image (TIFF file)')
options.add_argument('--verbose', action="store_true",
help='verbose mode')
return parser
# Remove NaN border
def cropping(img, border):
if border == 0:
return img
else:
img_cropped = img[border:-border,border:-border]
return img_cropped
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
imgPred_name = args.pred
imgPred_basename = os.path.basename(imgPred_name)
AdjacentTilesDim = args.adjtilesdim
imgGT_name = args.groundtruth
output_name = args.output
outputmask_name = args.output_mask
threshold = args.threshold
# Read images
#print("Reading Pred file...")
imgPred = imageio.imread(imgPred_name)
#print("Reading GroundTruth file...")
imgGT = imageio.imread(imgGT_name)
# Remove NaN border when needed
if (AdjacentTilesDim == 3):
Border = 1
elif (AdjacentTilesDim == 5):
Border = 2
else:
Border = 0
imgPred_Crop = cropping(imgPred, Border)
imgGT_Crop = cropping(imgGT, Border)
# Quality control - NaN
TestNaN_imgPred_Crop = np.any(np.isnan(imgPred_Crop))
TestNaN_imgGT_Crop = np.any(np.isnan(imgGT_Crop))
# # List indices with Nan values
# ListNaN_imgPred_Crop = np.argwhere(np.isnan(imgPred_Crop))
# print('ListNaN_imgPred_Crop: ', ListNaN_imgPred_Crop)
# Verbose mode
if args.verbose:
# print('imgPred type: ', imgPred.dtype)
print('imgPred shape: ', imgPred.shape)
# print('imgGT type: ', imgGT.dtype)
print('imgGT shape: ', imgGT.shape)
print('imgPred_Crop shape: ', imgPred_Crop.shape)
print('imgGT_Crop shape: ', imgGT_Crop.shape)
print('TestNaN_imgPred_Crop: ', TestNaN_imgPred_Crop)
print('TestNaN_imgGT_Crop: ', TestNaN_imgGT_Crop)
# Define sample_weight, where absolute difference is below threshold (2 pixels), to remove outliers
imgAbsDiff_Crop = np.absolute(imgPred_Crop - imgGT_Crop)
imgSampleWeight_Crop = np.uint8(np.where(imgAbsDiff_Crop < threshold, 1, 0))
if outputmask_name is not None:
imageio.imwrite(outputmask_name, imgSampleWeight_Crop * 255)
# Compute RMSE
rmse = mean_squared_error(imgGT_Crop, imgPred_Crop, sample_weight=imgSampleWeight_Crop, squared=False)
print('rmse: ',rmse)
QC_data = np.array([[imgPred_basename,rmse]])
Columns = ['FileName','RMSE_WithThreshold']
df = pd.DataFrame(QC_data,columns=Columns)
#print(df.head())
print('Saving CSV file...')
df.to_csv(output_name, index=False)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
This diff is collapsed.
#!/usr/bin/env python3
import imageio
import numpy as np
import sys
import argparse
import time
def arg_parser():
parser = argparse.ArgumentParser(description='Combine images to generate 3D volume')
required = parser.add_argument_group('Required')
required.add_argument('--corr', type=str, required=True,
help='2D Corr TIFF file (multi-layer)')
required.add_argument('--targetdisp', type=str, required=True,
help='Target Disparity TIFF file (single layer)')
required.add_argument('--groundtruth', type=str, required=True,
help='Ground Truth TIFF file (single layer)')
required.add_argument('--confidence', type=str, required=True,
help='Confidence TIFF file (single layer)')
required.add_argument('--disp_lma', type=str, required=True,
help='LMA Disparity TIFF file (single layer)')
required.add_argument('--output', type=str, required=True,
help='Combined TIFF file (multi-layer)')
options = parser.add_argument_group('Options')
options.add_argument('--repeat', type=int, default=15,
help='Repeat tile number (default 15)')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
img2DCorr_name = args.corr
imgTargetDisp_name = args.targetdisp
imgGT_name = args.groundtruth
imgConfidence_name = args.confidence
imgDispLMA_name = args.disp_lma
output_name = args.output
repeatNb = args.repeat
# Read 2dcorr image - all layers
print("Reading 2dcorr file...")
img2DCorr = imageio.mimread(img2DCorr_name,memtest=False)
img2DCorr = np.array(img2DCorr)
# print('\nimg2DCorr type: ', img2DCorr.dtype)
# print('img2DCorr shape: ', img2DCorr.shape)
# Read TargetDisp and GroundTruth images
print("Reading TargetDisp file...")
imgTargetDisp = imageio.imread(imgTargetDisp_name)
# print('imgTargetDisp type: ', imgTargetDisp.dtype)
# print('imgTargetDisp shape: ', imgTargetDisp.shape)
print("Reading GroundTruth file...")
imgGT = imageio.imread(imgGT_name)
# print('imgGT type: ', imgGT.dtype)
# print('imgGT shape: ', imgGT.shape)
print("Reading Confidence file...")
imgConfidence = imageio.imread(imgConfidence_name)
# print('imgConfidence type: ', imgConfidence.dtype)
# print('imgConfidence shape: ', imgConfidence.shape)
print("Reading DispLMA file...")
imgDispLMA = imageio.imread(imgDispLMA_name)
# print('imgDispLMA type: ', imgDispLMA.dtype)
# print('imgDispLMA shape: ', imgDispLMA.shape)
# - - - - - - - - - - -
print("Generating combined image...")
# Resample TargetDisp and imgTG (repeating values, to match img2DCor size)
imgTargetDisp_repeat0 = np.repeat(imgTargetDisp, repeatNb, axis=0)
imgTargetDisp_repeat = np.repeat(imgTargetDisp_repeat0, repeatNb, axis=1)
imgTargetDisp_repeat = np.expand_dims(imgTargetDisp_repeat, axis=0)
imgGT_repeat0 = np.repeat(imgGT, repeatNb, axis=0)
imgGT_repeat = np.repeat(imgGT_repeat0, repeatNb, axis=1)
imgGT_repeat = np.expand_dims(imgGT_repeat, axis=0)
# print('imgTargetDisp_repeat shape: ', imgTargetDisp_repeat.shape)
# print('imgGT_repeat shape: ', imgGT_repeat.shape)
imgConfidence_repeat0 = np.repeat(imgConfidence, repeatNb, axis=0)
imgConfidence_repeat = np.repeat(imgConfidence_repeat0, repeatNb, axis=1)
imgConfidence_repeat = np.expand_dims(imgConfidence_repeat, axis=0)
print('\t imgConfidence_repeat shape: ', imgConfidence_repeat.shape)
imgDispLMA_repeat0 = np.repeat(imgDispLMA, repeatNb, axis=0)
imgDispLMA_repeat = np.repeat(imgDispLMA_repeat0, repeatNb, axis=1)
imgDispLMA_repeat = np.expand_dims(imgDispLMA_repeat, axis=0)
print('\t imgDispLMA_repeat shape: ', imgDispLMA_repeat.shape)
# Stack layers to img2dCorr, to generate one 3D volume
imgAll = np.concatenate((img2DCorr,imgTargetDisp_repeat), axis = 0)
imgAll = np.concatenate((imgAll,imgGT_repeat), axis = 0)
imgAll = np.concatenate((imgAll,imgConfidence_repeat), axis = 0)
imgAll = np.concatenate((imgAll,imgDispLMA_repeat), axis = 0)
imageio.mimwrite(output_name,imgAll)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
print("Reading Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Quality control')
required = parser.add_argument_group('Required')
required.add_argument('--confidence', type=str, required=True,
help='2D confidence TIFF file (single layer)')
required.add_argument('--output', type=str, required=True,
help='CSV file')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
imgConfidence_name = args.confidence
imgConfidence_basename = os.path.basename(imgConfidence_name)
output_name = args.output
# Read confidence image - all layers
#print("Reading confidence file...")
imgConfidence = imageio.imread(imgConfidence_name)
print('\nimgConfidence type: ', imgConfidence.dtype)
print('imgConfidence shape: ', imgConfidence.shape)
Bool_NaN = np.isnan(imgConfidence)
Nb_NaN = np.count_nonzero(Bool_NaN)
imgConfidence_NotNaN = imgConfidence[~np.isnan(imgConfidence)]
print('\nNb_NaN: ', Nb_NaN)
print('\nimgConfidence_NotNaN type: ', imgConfidence_NotNaN.dtype)
print('imgConfidence_NotNaN shape: ', imgConfidence_NotNaN.shape)
# Compute Image info: mean,min,max values
imgConfidence_mean = np.mean(imgConfidence_NotNaN)
imgConfidence_min = np.min(imgConfidence_NotNaN)
imgConfidence_max = np.max(imgConfidence_NotNaN)
print('\nimgConfidence_mean : ', imgConfidence_mean)
print('imgConfidence_min : ', imgConfidence_min)
print('imgConfidence_max : ', imgConfidence_max)
# Save information to CSV file
QC_data = np.array([[imgConfidence_basename,imgConfidence.shape[0],imgConfidence.shape[1], \
Nb_NaN, imgConfidence_mean, imgConfidence_min, imgConfidence_max]
])
Columns = ['FileName','imgConfidence_Shape0','imgConfidence_Shape1', \
'Nb_NaN','imgConfidence_mean','imgConfidence_min','imgConfidence_max']
df = pd.DataFrame(QC_data,columns=Columns)
print(df.head())
df.to_csv(output_name, index=False)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python3
import imageio
import numpy as np
import pandas as pd
import sys
import argparse
import time
import os
def arg_parser():
parser = argparse.ArgumentParser(description='Quality control')
required = parser.add_argument_group('Required')
required.add_argument('--corr', type=str, required=True,
help='2D Corr TIFF file (multi-layer)')
required.add_argument('--targetdisp', type=str, required=True,
help='Target Disparity TIFF file (single layer)')
required.add_argument('--groundtruth', type=str, required=True,
help='Ground Truth TIFF file (single layer)')
required.add_argument('--output', type=str, required=True,
help='CSV file')
return parser
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
img2DCorr_name = args.corr
img2DCorr_basename = os.path.basename(img2DCorr_name)
imgTargetDisp_name = args.targetdisp
imgGT_name = args.groundtruth
output_name = args.output
# Read 2dcorr image - all layers
#print("Reading 2dcorr file...")
img2DCorr = imageio.mimread(img2DCorr_name,memtest=False)
img2DCorr = np.array(img2DCorr)
# print('\nimg2DCorr type: ', img2DCorr.dtype)
#print('img2DCorr shape: ', img2DCorr.shape)
# Read TargetDisp and GroundTruth images
#print("Reading TargetDisp file...")
imgTargetDisp = imageio.imread(imgTargetDisp_name)
# print('imgTargetDisp type: ', imgTargetDisp.dtype)
#print('imgTargetDisp shape: ', imgTargetDisp.shape)
#print("Reading GroundTruth file...")
imgGT = imageio.imread(imgGT_name)
# print('imgGT type: ', imgGT.dtype)
#print('imgGT shape: ', imgGT.shape)
# Compute Image info: mean,min,max values
img2DCorr_mean = np.mean(img2DCorr)
img2DCorr_min = np.min(img2DCorr)
img2DCorr_max = np.max(img2DCorr)
imgTargetDisp_mean = np.mean(imgTargetDisp)
imgTargetDisp_min = np.min(imgTargetDisp)
imgTargetDisp_max = np.max(imgTargetDisp)
imgGT_mean = np.mean(imgGT)
imgGT_min = np.min(imgGT)
imgGT_max = np.max(imgGT)
# print('\n img2DCorr pixel info:')
# print('\t img2DCorr_mean: ', img2DCorr_mean)
# print('\t img2DCorr_min: ', img2DCorr_min)
# print('\t img2DCorr_max: ', img2DCorr_max)
# Compute image difference between TargetDisp and GroundTruth images
Diff = np.abs(imgTargetDisp - imgGT)
Diff_mean = np.mean(Diff)
Diff_min = np.min(Diff)
Diff_max = np.max(Diff)
# print('Image difference:')
# print('\t Diff_mean:',Diff_mean )
# print('\t Diff_min:', Diff_min)
# print('\t Diff_max:', Diff_max)
# Save information to CSV file
QC_data = np.array([[img2DCorr_basename,img2DCorr.shape[0],img2DCorr.shape[1], img2DCorr.shape[2], \
imgTargetDisp.shape[0], imgTargetDisp.shape[1], imgGT.shape[0], imgGT.shape[1], \
img2DCorr_mean, img2DCorr_min, img2DCorr_max, \
imgTargetDisp_mean, imgTargetDisp_min, imgTargetDisp_max, \
imgGT_mean, imgGT_min, imgGT_max, \
Diff_mean, Diff_min, Diff_max]
])
Columns = ['FileName','img2DCorr_Shape0','img2DCorr_Shape1','img2DCorr_Shape2',\
'imgTargetDisp_Shape0','imgTargetDisp_Shape1',\
'imgGT_Shape0','imgGT_Shape1',
'img2DCorr_mean','img2DCorr_min','img2DCorr_max',\
'imgTargetDisp_mean','imgTargetDisp_min','imgTargetDisp_max',\
'imgGT_mean','imgGT_min','imgGT_max',\
'Diff_mean','Diff_min','Diff_max',\
]
df = pd.DataFrame(QC_data,columns=Columns)
print(df.head())
df.to_csv(output_name, index=False)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
#print("Computing Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
This project contains various python scripts used for disparity map analysis
## 1. Data pre-processing stage
Note: Data preprocessing stage to generate input files for AI analysis via ir-tp-net
### 1.1. Extracting images of interest from multi-layer 3D TIFF files
Example:
'''
python3 ./Preprocessing_2DPhaseCorrelation.py --crop_border --input $i --targetdisp $TargetDispImage \
--groundtruth $GroundTruthImage --confidence $ConfidenceImage --disp_lma $DispLMAImage --corr $CorrImage --verbose
'''
Notes:
- Input file is a special multi-layer TIFF file
- Output CorrImage is a 3D TIFF file with 120 layers
- Other output files are 2D images
### 1.2. Generating 3D TIFF files as direct input files to neural network ir-tp-net
Example:
'''
python3 ./Preprocessing_CombinedImages.py --corr $CorrImage --targetdisp $TargetDispImage \
--groundtruth $GroundTruthImage --confidence $ConfidenceImage --disp_lma $DispLMAImage --output $CombinedImage
'''
Notes:
- Output file is a 3D image with 124 layers
- scaled to the size of the correlation image
## 2. AI analysis stage
Please see "ir-tp-net" project for neural network training and testing to generate predicted disparity map
## 3. Data post-processing stage
Note: Data postprocessing stage to analyze outputs from AI analysis
### 3.1. Density analysis
Examples:
'''
python3 ./Compute_Density.py --pred $i --groundtruth $GroundTruthImage --adjtilesdim 1 --output $Density_CSVFile --inclusionmask $InclusionMask_File --exclusionmask $ExclusionMask_File --threshold 2.0 --verbose
'''
Note: input file is the predicted disparity map
### 3.2. RMSE analysis
Example:
'''
python3 ./Compute_RMSE_WithThreshold.py --pred $i --groundtruth $GroundTruthImage --adjtilesdim $AdjTilesDim --threshold $RMSE_Threshold --output $RMSE_CSVFile
python3 ./Compute_RMSE_WithFiltering.py --pred $i --groundtruth $GroundTruthImage --confidence $ConfidenceImage --disp_lma $DispLMAImage --adjtilesdim 1 --threshold $Threshold --output $RMSE_CSVFile --output_mask $RMSEFiltering_MaskFile
'''
Note: input file is the predicted disparity map
## 4. Data Quality control
Quality control stage generating multi-layer 3D tiff file
'''
python3 ./Compute_Inference_QCImage.py --pred $i --groundtruth $GroundTruthImage --targetdisp $TargetDispImage --mask $DataFilteringMask --threshold 2.0 --output $InferenceQC_File --verbose
'''
Notes:
- input file is the predicted disparity map
- output file includes predicted disparity map, ground truth map, target disparity map and mask map
#!/usr/bin/env python3
import imageio
import numpy as np
import sys
import argparse
import time
def arg_parser():
parser = argparse.ArgumentParser(description='Generate combined image with random noise level')
required = parser.add_argument_group('Required')
required.add_argument('--template', type=str, required=True,
help='Combined TIFF file (multi-layer)')
required.add_argument('--offsetTemplate', type=float, required=True,
help='Template noise offset level')
required.add_argument('--noiseLevel', type=float, required=True,
help='Noise offset level')
required.add_argument('--output', type=str, required=True,
help='Output combined TIFF file (multi-layer)')
options = parser.add_argument_group('Options')
options.add_argument('--tileSize', type=int, default=15,
help='Tile Size')
options.add_argument('--outputTargetDisp', type=str,
help='Output TargetDisp (2D image)')
options.add_argument('--outputGroundTruth', type=str,
help='Output GroundTruth (2D image)')
return parser
# Steps
# Option to define offset noise level
# Assess offset images based on noise level
# Load all images as list
# for each tile, pick among available images
# Save image
#MAIN
def main(args=None):
args = arg_parser().parse_args(args)
Time1 = time.time()
#print(bcolors.BOLDWHITE+"Time1: "+str(Time1)+bcolors.ENDC)
template_name = args.template
offset_template = args.offsetTemplate
noise_level = args.noiseLevel
output_name = args.output
tile_size = args.tileSize
output_TargetDisp = args.outputTargetDisp
output_GroundTruth = args.outputGroundTruth
# Offsets
List_AvailableOffsets = np.array([-5.000, -4.003, -3.116, -2.341, -1.676, -1.122, -0.679, -0.346, -0.125, -0.014, \
0.014, 0.125, 0.346, 0.679, 1.122, 1.676, 2.341, 3.116, 4.003, 5.000])
#print('List_AvailableOffsets',List_AvailableOffsets)
# Filter offsets based on offsetLevel
List_Offsets = List_AvailableOffsets[np.logical_and(List_AvailableOffsets >= -noise_level,List_AvailableOffsets <= noise_level)]
print('\n List_Offsets',List_Offsets)
# Nb images
nb_images = len(List_Offsets)
print('nb_images',nb_images)
# Read template combined image
print("\n Reading template file...")
imgTemplate_layerList = imageio.mimread(template_name,memtest=False)
imgTemplate = np.stack(imgTemplate_layerList, axis=0)
print('\t imgTemplate type: ', imgTemplate.dtype)
print('\t imgTemplate shape: ', imgTemplate.shape)
print("\n Creating image stack...")
image_stack = np.zeros((nb_images, imgTemplate.shape[0], imgTemplate.shape[1], imgTemplate.shape[2]), dtype = imgTemplate.dtype)
# print('image_stack type: ', image_stack.dtype)
# print('image_stack shape: ', image_stack.shape)
# List_ImageNames
List_ImageNames = []
for i, offset in enumerate(List_Offsets):
print('\t Reading Image {} with offset {:.3f} ...'.format(i,offset))
offset_string = "{:.3f}".format(offset)
FileName_current = template_name.replace(str(offset_template),offset_string)
List_ImageNames.append(FileName_current)
img_current_layerList = imageio.mimread(FileName_current,memtest=False)
img_current = np.stack(img_current_layerList, axis=0)
image_stack[i,...] = img_current
#print('List_ImageNames',List_ImageNames)
# Generating image stack
# image_stack = np.stack(List_Images, axis=0)
print('\t image_stack type: ', image_stack.dtype)
print('\t image_stack shape: ', image_stack.shape)
# Output image
print("\n Generating output image by random input patch selection...")
imgOutput = np.zeros(imgTemplate.shape, dtype = imgTemplate.dtype)
print('\t imgOutput type: ', imgOutput.dtype)
print('\t imgOutput shape: ', imgOutput.shape)
i = j = 0
for i in range(0,imgTemplate.shape[1], tile_size):
for j in range(0,imgTemplate.shape[2], tile_size):
random_selection = np.random.randint(nb_images)
#print('i j rnd: {} {} {}'.format(i,j,random_selection))
#imgOutput[:,i:i+tile_size,j:j+tile_size] = tile_size * patchNb
imgOutput[:,i:i+tile_size,j:j+tile_size] = image_stack[random_selection,:,i:i+tile_size,j:j+tile_size]
imgOutput_TargetDisp = imgOutput[-2,:,:]
imgOutput_TargetDisp = imgOutput_TargetDisp[::tile_size,::tile_size]
print('\t imgOutput_TargetDisp shape: ', imgOutput_TargetDisp.shape)
imgOutput_GroundTruth = imgOutput[-1,:,:]
imgOutput_GroundTruth = imgOutput_GroundTruth[::tile_size,::tile_size]
print('\t imgOutput_GroundTruth shape: ', imgOutput_GroundTruth.shape)
# Write output combined image (multi-layer)
print("\n Writing output file - combined image...")
imageio.mimwrite(output_name,imgOutput)
# Write output TargetDisp image (2D)
if (output_TargetDisp is not None):
print("\nWriting output file - TargetDisp image...")
imageio.imwrite(output_TargetDisp,imgOutput_TargetDisp)
# Write output GroundTruth image (2D)
if (output_GroundTruth is not None):
print("\nWriting output file - GroundTruth image...")
imageio.imwrite(output_GroundTruth,imgOutput_GroundTruth)
Time2 = time.time()
#print(bcolors.BOLDWHITE+"Time2: "+str(Time2)+bcolors.ENDC)
TimeDiff = Time2 - Time1
print("Reading Time: "+str(TimeDiff))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment