Diff of /eval.py [000000] .. [f2ca4d]

Switch to unified view

a b/eval.py
1
import sys
2
3
sys.path.append('architectures/deeplab_3D/')
4
sys.path.append('architectures/unet_3D/')
5
sys.path.append('architectures/hrnet_3D/')
6
sys.path.append('architectures/experiment_nets_3D/')
7
sys.path.append('utils/')
8
9
import os
10
from os import walk
11
import torch
12
import torch.nn as nn
13
from torch.autograd import Variable
14
import torch.backends.cudnn as cudnn
15
import torch.nn.functional as F
16
import torch.optim as optim
17
18
import numpy as np
19
import scipy.misc
20
import os
21
from tqdm import *
22
import random
23
from random import randint
24
from docopt import docopt
25
26
import deeplab_resnet_3D
27
import unet_3D
28
import highresnet_3D
29
import exp_net_3D
30
31
import lossF
32
import PP
33
import augmentations as AUG
34
import evalF as EF
35
import evalFP as EFP
36
import evalMetrics as METRICS
37
38
docstr = """Write something here
39
40
Usage:
41
    evalpyt.py [options]
42
43
Options:
44
    -h, --help                  Print this message
45
    --visualize                 view outputs of each sketch
46
    --evalMethod=<int>          0 for evaluation of model by whole image, 1 for patches [default: 1]
47
    --patchPredSize=<int>       If evaluating model with patches, the size of the patch [default: 60]
48
    --evalMetric=<str>          'iou','dice',only iou supported right now [default: iou]
49
    --snapPrefix=<str>          Snapshot prefix. a_1000.pth, a_2000.pth, a is prefix [default: HR3Dadice_1_2017-07-16-18-32_iter]
50
    --singleEval                Evaluate a single model
51
    --postFix=<str>             Postfix [default: _200x200x100orig]
52
    --resultsDir=<str>          Path to save evaluation results and predictions to [default: eval_results/]
53
    --predictionsPath=<str>     predictions path [default: 1]
54
    --snapshotPath=<str>        Snapshot path [default: models/snapshots/]
55
    --mainFolderPath=<str>      Main folder path [default: ../Data/MS2017b/]
56
    --NoLabels=<int>            The number of different labels in training data [default: 2]
57
    --gpu0=<int>                GPU number [default: 0]
58
    --useGPU=<int>              Use GPU [default: 0]
59
    --testMode                  Enable test model (no evaluation, only predictions)
60
    --modelPath=<str>           Full model path to test if only 1 model (test mode or singleEval mode use this) [default: None]
61
    --iterRange=<str>           Range of num iters [default: 1-21]
62
    --iterStep=<int>            Step size of iters [default: 1]
63
    --testAugm                  Apply test time augmentations
64
    --extraPatch=<int>          Extra patch size [default: 5]
65
"""
66
67
args = docopt(docstr, version='v0.1')
68
print(args)
69
70
eval_method = int(args['--evalMethod'])
71
patch_pred_size = int(args['--patchPredSize'])
72
eval_metric = args['--evalMetric']
73
snap_prefix = args['--snapPrefix']
74
results_dir = args['--resultsDir']
75
snapshots_path = args['--snapshotPath']
76
postfix = args['--postFix']
77
main_folder_path = args['--mainFolderPath']
78
num_labels = int(args['--NoLabels'])
79
gpu0 = int(args['--gpu0'])
80
useGPU = int(args['--useGPU'])
81
test_mode = args['--testMode']
82
model_path = args['--modelPath']
83
iter_range = args['--iterRange']
84
iter_step = int(args['--iterStep'])
85
iter_low, iter_high = int(iter_range.split('-')[0]), int(iter_range.split('-')[1])
86
eval_list = main_folder_path + 'val' + postfix + '.txt'
87
test_augm = args['--testAugm']
88
single_eval = args['--singleEval']
89
extra_patch = int(args['--extraPatch'])
90
if single_eval or test_mode:
91
    models_path = model_path
92
else:
93
    models_path = snap_prefix
94
95
if test_mode:
96
    if not os.path.exists('temp_preds/'):
97
        os.makedirs('temp_preds/')
98
else:
99
    if not os.path.exists(results_dir):
100
        print('Creating directory at:' , results_dir)
101
        os.makedirs(results_dir)
102
    results_file = open(os.path.join(results_dir, os.path.splitext(os.path.basename(models_path))[0] + '_ep_' + str(extra_patch) + '.txt'), 'w+')
103
104
if num_labels == 2:
105
    onlyLesions = True
106
else:
107
    onlyLesions = False
108
109
num_labels2 = 209
110
111
def modelInit():
112
    isPriv = False
113
    f_name = models_path.split('/')[-1]
114
    #load model
115
    if 'EXP3D' in f_name:
116
        experiment = f_name.replace('EXP3D_', '').replace('.pth', '').split('_')
117
        experiment = '_'.join(experiment[0:3])
118
        dilation_arr, isPriv, withASPP = PP.getExperimentInfo(experiment)
119
        model = exp_net_3D.getExpNet(num_labels, dilation_arr, isPriv, NoLabels2 = num_labels2, withASPP = withASPP)
120
    elif 'HR3D' in f_name:
121
        model = highresnet_3D.getHRNet(num_labels)
122
    elif 'DL3D' in f_name:
123
        model = deeplab_resnet_3D.Res_Deeplab(num_labels)
124
    elif 'UNET3D' in  f_name:
125
        model = unet_3D.UNet3D(1, num_labels)
126
    else:
127
        print('No model available for this .pth')
128
        sys.exit()
129
130
    model.eval()
131
132
    if useGPU:
133
        model.cuda(gpu0)
134
135
    return model, isPriv
136
137
def evalModel(model):
138
    img_list = open(eval_list).readlines()
139
    if test_mode:
140
        if models_path == 'None':
141
            print('Insert model path if you are testing this model')
142
            sys.exit()
143
        model = loadSnapshot(model, models_path)
144
145
        for img_str in img_list:
146
            img_str = img_str.rstrip()
147
            img, gt, out, affine = EF.predict(os.path.join(main_folder_path, img_str),
148
                                                        model, num_labels, postfix, main_folder_path, eval_method, 
149
                                                        gpu0, useGPU, patch_size = patch_pred_size, test_augm = test_augm, extra_patch = extra_patch)
150
151
            #save prediction
152
            save_path = os.path.join('temp_preds', 'pred_' + img_str.split('/')[-3] + '_s' + str(gt.shape[0]) + '.nii.gz')
153
            PP.saveScan(out, affine, save_path)
154
    else:
155
        if single_eval:
156
            r = range(1)
157
        else:
158
            r = range(iter_low, iter_high, iter_step)
159
        for iter in r:
160
            counter = 0
161
            if single_eval:
162
                model = loadSnapshot(model, models_path)
163
            else:
164
                model = loadSnapshot(model, os.path.join(snapshots_path, models_path + '_' + str(iter*1000) + '.pth'))
165
            r_list_iou = []
166
            r_list_dice = []
167
            r_list_recall = []
168
            r_list_precision = []
169
            for img_str in img_list:
170
                img_str = img_str.rstrip()
171
                img, gt, out, _ = EF.predict(os.path.join(main_folder_path, img_str),
172
                                                                model, num_labels, postfix, main_folder_path,
173
                                                                eval_method, gpu0, useGPU,  patch_size = patch_pred_size, test_augm = test_augm, extra_patch = extra_patch)
174
175
                result_iou = METRICS.metricEval('iou', out, gt, num_labels)
176
                result_dice = METRICS.metricEval('dice', out, gt, num_labels)
177
                result_recall = METRICS.metricEval('recall', out, gt, num_labels)
178
                result_precision = METRICS.metricEval('precision', out, gt, num_labels)
179
180
                r_list_iou.append(result_iou)
181
                r_list_dice.append(result_dice)
182
                r_list_recall.append(result_recall)
183
                r_list_precision.append(result_precision)
184
                counter += 1
185
                print "Model Iter {:5d} Progress: {:4d}/{:4d} iou {:1.4f} dice {:1.4f} recall {:1.4f} precision {:1.4f}  \r".format(iter * 1000, counter, len(img_list), result_iou, result_dice, result_recall, result_precision),
186
                sys.stdout.flush()
187
            avg_iou = np.sum(np.asarray(r_list_iou))/len(r_list_iou)
188
            avg_dice = np.sum(np.asarray(r_list_dice))/len(r_list_dice)
189
            avg_recall = np.sum(np.asarray(r_list_recall))/len(r_list_recall)
190
            avg_precision = np.sum(np.asarray(r_list_precision))/len(r_list_precision)
191
            results_file.write('Iterations: {:5d} iou: {:1.4f} dice: {:1.4f} recall: {:1.4f} precision: {:1.4f} \n'.format(iter*1000, avg_iou, avg_dice, avg_recall, avg_precision))
192
            print('Done!')
193
        results_file.close()
194
195
def evalModelPriv(model):
196
    img_list = open(eval_list).readlines()
197
    if test_mode:
198
        if models_path == 'None':
199
            print('Insert model path if you are testing this model')
200
            sys.exit()
201
        model = loadSnapshot(model, models_path)
202
203
        for img_str in img_list:
204
            img_str = img_str.rstrip()
205
            img, gif, out1, gt, out2, affine = EFP.predict(os.path.join(main_folder_path, img_str), 
206
                                                        model, num_labels, num_labels2, postfix, main_folder_path, eval_method, 
207
                                                        gpu0, useGPU, patch_size = patch_pred_size, test_augm = test_augm, extra_patch = extra_patch)          
208
            #save prediction
209
            save_path = os.path.join('temp_preds', 'pred_' + img_str.split('/')[-3] + '_s' + str(gt.shape[0]) + '.nii.gz')
210
            PP.saveScan(out2, affine, save_path)
211
    else:
212
        if single_eval:
213
            r = range(1)
214
        else:
215
            r = range(iter_low, iter_high, iter_step)
216
        for iter in r:
217
            if single_eval:
218
                model = loadSnapshot(model, models_path)
219
            else:
220
                model = loadSnapshot(model, os.path.join(snapshots_path, models_path + '_' + str(iter*1000) + '.pth'))
221
            counter = 0
222
223
            r_list_iou_main = []
224
            r_list_dice_main = []
225
            r_list_recall_main = []
226
            r_list_precision_main = []
227
228
            r_list_iou_sec = []
229
230
            v = 0
231
            v_priv = 0
232
233
            for img_str in img_list:
234
                img_str = img_str.rstrip()
235
                img, gt1, out1, gt2, out2, _ = EFP.predict(os.path.join(main_folder_path, img_str),
236
                                                                model, num_labels, num_labels2, postfix, main_folder_path, 
237
                                                                eval_method, gpu0, useGPU,  patch_size = patch_pred_size, test_augm = test_augm, extra_patch = extra_patch)
238
239
                result_iou_main = METRICS.metricEval('iou', out2, gt2, num_labels)
240
                result_dice_main = METRICS.metricEval('dice', out2, gt2, num_labels)
241
                result_recall_main = METRICS.metricEval('recall', out2, gt2, num_labels)
242
                result_precision_main = METRICS.metricEval('precision', out2, gt2, num_labels)
243
                result_iou_sec = METRICS.metricEval('iou', out1, gt1, num_labels2)
244
245
                r_list_iou_main.append(result_iou_main)
246
                r_list_dice_main.append(result_dice_main)
247
                r_list_recall_main.append(result_recall_main)
248
                r_list_precision_main.append(result_precision_main)
249
                r_list_iou_sec.append(result_iou_sec)
250
                counter += 1
251
                print "Model Iter | {:5d} | Progress: | {:4d}/{:4d} | Last result {:1.4f}    \r".format(iter * 1000, counter, len(img_list), result_iou_main),
252
                sys.stdout.flush()
253
            avg_iou = np.sum(np.asarray(r_list_iou_main))/len(r_list_iou_main)
254
            avg_dice = np.sum(np.asarray(r_list_dice_main))/len(r_list_dice_main)
255
            avg_recall = np.sum(np.asarray(r_list_recall_main))/len(r_list_recall_main)
256
            avg_precision = np.sum(np.asarray(r_list_precision_main))/len(r_list_precision_main)
257
            avg_iou_sec =  np.sum(np.asarray(r_list_iou_sec))/len(r_list_iou_sec)
258
            results_file.write('Iterations: {:5d} iou: {:1.4f} dice: {:1.4f} recall: {:1.4f} precision: {:1.4f} iou_secondary: {:1.4f} \n'.format(iter*1000, avg_iou, avg_dice, avg_recall, avg_precision, avg_iou_sec))
259
        print('Done!')
260
        results_file.close()
261
262
def loadSnapshot(model, path):
263
    if useGPU:
264
        #loading on GPU when model was saved on GPU
265
        saved_state_dict = torch.load(path)
266
    else:
267
        #loading on CPU when model was saved on GPU
268
        saved_state_dict = torch.load(path, map_location=lambda storage, loc: storage)
269
    model.load_state_dict(saved_state_dict)
270
    return model
271
272
if __name__ == "__main__":
273
    model, with_priv = modelInit()
274
    if with_priv:
275
        evalModelPriv(model)
276
    else:
277
        evalModel(model)