Diff of /scripts/test/predict.py [000000] .. [2afb35]

Switch to unified view

a b/scripts/test/predict.py
1
#==============================================================================#
2
#  Author:       Dominik Müller                                                #
3
#  Copyright:    2020 IT-Infrastructure for Translational Medical Research,    #
4
#                University of Augsburg                                        #
5
#                                                                              #
6
#  This program is free software: you can redistribute it and/or modify        #
7
#  it under the terms of the GNU General Public License as published by        #
8
#  the Free Software Foundation, either version 3 of the License, or           #
9
#  (at your option) any later version.                                         #
10
#                                                                              #
11
#  This program is distributed in the hope that it will be useful,             #
12
#  but WITHOUT ANY WARRANTY; without even the implied warranty of              #
13
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               #
14
#  GNU General Public License for more details.                                #
15
#                                                                              #
16
#  You should have received a copy of the GNU General Public License           #
17
#  along with this program.  If not, see <http://www.gnu.org/licenses/>.       #
18
#==============================================================================#
19
#-----------------------------------------------------#
20
#                   Library imports                   #
21
#-----------------------------------------------------#
22
import tensorflow as tf
23
from miscnn.data_loading.interfaces import NIFTI_interface
24
from miscnn import Data_IO, Preprocessor, Neural_Network
25
from miscnn.processing.subfunctions import Normalization, Clipping, Resampling
26
from miscnn.neural_network.architecture.unet.standard import Architecture
27
from miscnn.neural_network.metrics import tversky_crossentropy, dice_soft, \
28
                                          dice_crossentropy, tversky_loss
29
import argparse
30
import os
31
32
#-----------------------------------------------------#
33
#                      Argparser                      #
34
#-----------------------------------------------------#
35
parser = argparse.ArgumentParser(description="Automated COVID-19 Segmentation")
36
parser.add_argument("--model", help="Path to model", required=True, type=str, dest="model")
37
parser.add_argument("--output", help="Path to the output directory",
38
                    required=True, type=str, dest="output")
39
parser.add_argument("-g", "--gpu", help="GPU ID selection for multi cluster",
40
                    required=False, type=int, dest="gpu", default=0)
41
args = parser.parse_args()
42
path_model = args.model
43
path_preds = args.output
44
os.environ["CUDA_VISIBLE_DEVICES"] = str(int(args.gpu))
45
46
#-----------------------------------------------------#
47
#               Setup of MIScnn Pipeline              #
48
#-----------------------------------------------------#
49
# Initialize Data IO Interface for NIfTI data
50
## We are using 4 classes due to [background, lung_left, lung_right, covid-19]
51
interface = NIFTI_interface(channels=1, classes=4)
52
53
# Create Data IO object to load and write samples in the file structure
54
data_io = Data_IO(interface, input_path="data.testing", output_path=path_preds,
55
                  delete_batchDir=False)
56
57
# Access all available samples in our file structure
58
sample_list = data_io.get_indiceslist()
59
sample_list.sort()
60
61
# Create a clipping Subfunction to the lung window of CTs (-1250 and 250)
62
sf_clipping = Clipping(min=-1250, max=250)
63
# Create a pixel value normalization Subfunction to scale between 0-255
64
sf_normalize = Normalization(mode="grayscale")
65
# Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70
66
sf_resample = Resampling((1.58, 1.58, 2.70))
67
# Create a pixel value normalization Subfunction for z-score scaling
68
sf_zscore = Normalization(mode="z-score")
69
70
# Assemble Subfunction classes into a list
71
sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore]
72
73
# Create and configure the Preprocessor class
74
pp = Preprocessor(data_io, data_aug=None, batch_size=2, subfunctions=sf,
75
                  prepare_subfunctions=True, prepare_batches=False,
76
                  analysis="patchwise-crop", patch_shape=(160, 160, 80),
77
                  use_multiprocessing=True)
78
# Adjust the patch overlap for predictions
79
pp.patchwise_overlap = (80, 80, 30)
80
pp.mp_threads = 16
81
82
# Initialize the Architecture
83
unet_standard = Architecture(depth=4, activation="softmax",
84
                             batch_normalization=True)
85
86
# Create the Neural Network model
87
model = Neural_Network(preprocessor=pp, architecture=unet_standard,
88
                       loss=tversky_crossentropy,
89
                       metrics=[tversky_loss, dice_soft, dice_crossentropy],
90
                       batch_queue_size=3, workers=3, learninig_rate=0.001)
91
92
# Load best model weights during fitting
93
model.load(path_model)
94
95
# Compute predictions
96
model.predict(sample_list, return_output=False)