Diff of /utils/evalFP.py [000000] .. [f2ca4d]

Switch to unified view

a b/utils/evalFP.py
1
import sys
2
import os
3
4
import evalMetrics as METRICS
5
import PP
6
import numpy as np
7
import torch
8
import torch.nn as nn
9
from torch.autograd import Variable
10
11
import augmentations as AUG
12
13
#---------------------------------------------
14
#Evaluation functions for PrivCNNs
15
#---------------------------------------------
16
17
def evalModelX(model, num_labels, num_labels2, postfix, main_folder_path, eval_method, gpu0, useGPU,
18
                    patch_size = 70, eval_metric = 'iou', test_augm = False, extra_patch = 30, priv_eval = True):
19
    eval_list = main_folder_path + 'val' + postfix + '.txt'
20
    img_list = open(eval_list).readlines()
21
    v = 0
22
    v_priv = 0
23
    for img_str in img_list:
24
        img_str = img_str.rstrip()
25
        _, gt1, out1, gt2, out2, _ = predict(os.path.join(main_folder_path, img_str), model, num_labels, num_labels2, 
26
                                                    postfix, main_folder_path, eval_method, gpu0, useGPU, patch_size=patch_size, 
27
                                                    test_augm = test_augm, extra_patch = extra_patch, priv_eval = priv_eval)
28
        v += METRICS.metricEval(eval_metric, out2, gt2, num_labels)
29
        v_priv += METRICS.metricEval(eval_metric, out1, gt1, num_labels2)
30
    return v_priv / len(img_list), v / len(img_list)
31
32
33
def testPredict(img, model, num_labels, num_labels2, eval_method, gpu0, useGPU, stride = 50, patch_size = 70, test_augm = True, extra_patch = 30, get_soft = False):
34
    if eval_method == 0:
35
        if useGPU:
36
            _, out = model(Variable(torch.from_numpy(img).float(),volatile = True).cuda(gpu0))
37
        else:
38
            _, out = model(Variable(torch.from_numpy(img).float(),volatile = True))
39
        out = out.data[0].cpu().numpy()
40
    elif eval_method == 1:
41
            _, out = predictByPatches(img, model, num_labels, num_labels2, useGPU, gpu0, 
42
                    stride = stride, patch_size = patch_size, 
43
                    test_augm = test_augm, extra_patch = extra_patch, priv_eval = False)
44
    out = out.squeeze()
45
    if get_soft:
46
        return out
47
    #take argmax to get predictions
48
    out = np.argmax(out, axis = 0)
49
    #remove batch and label dimension
50
    out = out.squeeze()
51
    return out
52
53
#returns the image as numpy, the ground truth and the prediction given model and input path
54
#affine = True, returns the affine transformation from loading the scan
55
def predict(img_path, model, num_labels, num_labels2, postfix, main_folder_path, eval_method, gpu0, useGPU, 
56
                        stride = 50, patch_size = 70, test_augm = True, extra_patch = 30, priv_eval = True):
57
    #read image
58
    img = PP.numpyFromScan(img_path)
59
    #read wmh
60
    gt_path = img_path.replace('slices', 'gt_slices').replace('FLAIR', 'wmh').replace('/pre','')
61
    gt, affine = PP.numpyFromScan(gt_path, get_affine = True, makebin = (num_labels == 2))
62
63
    gif_path = img_path.replace('scans', 'gifs').replace('FLAIR', 'parcellation').replace('/pre','')
64
    gif = PP.numpyFromScan(gif_path)
65
66
    img = img.transpose((3,0,1,2))
67
    img = img[np.newaxis, :]
68
    gt = gt.transpose((3,0,1,2))
69
    gif = gif.transpose((3,0,1,2))
70
71
    if eval_method == 0:
72
        if useGPU:
73
            out1_v, out2_v = model(Variable(torch.from_numpy(img).float(),volatile=True).cuda(gpu0))
74
        else:
75
            out1_v, out2_v = model(Variable(torch.from_numpy(img).float(),volatile=True))
76
        out1 = out1_v.data[0].cpu().numpy()
77
        out2 = out2_v.data[0].cpu().numpy()
78
        del out1_v, out2_v
79
    elif eval_method == 1:
80
            out1, out2 = predictByPatches(img, model, num_labels, num_labels2, useGPU, gpu0, 
81
                    stride = stride, test_augm = test_augm, patch_size = patch_size, 
82
                    extra_patch = extra_patch, priv_eval = priv_eval)
83
    out1 = out1.squeeze()
84
    out1 = np.argmax(out1, axis = 0)
85
    out1 = out1.squeeze()
86
87
    out2 = out2.squeeze()
88
    out2 = np.argmax(out2, axis = 0)
89
    out2 = out2.squeeze()
90
91
    #remove batch and label dimension
92
    img = img.squeeze()
93
94
    return img, gif, out1, gt, out2, affine
95
96
def predictByPatches(img, model, num_labels, num_labels2, useGPU, gpu0, patch_size = 70, test_augm = False, stride = 50, extra_pad = 0, extra_patch = 30, priv_eval = True):    
97
    batch_num, num_channels, dim1, dim2, dim3 = img.shape
98
    p_size = patch_size
99
    #add padding to each dim s.t. % stride = 0
100
    dim1_pad = stride - ((dim1-p_size) % stride)
101
    dim2_pad = stride - ((dim2-p_size) % stride)
102
    dim3_pad = stride - ((dim3-p_size) % stride)
103
104
    x_1_off, x_2_off = int(round(dim1_pad/2.0)), dim1_pad//2
105
    y_1_off, y_2_off = int(round(dim2_pad/2.0)), dim2_pad//2
106
    z_1_off, z_2_off = int(round(dim3_pad/2.0)), dim3_pad//2
107
108
    img = np.lib.pad(img, ((0,0),(0,0), (x_1_off, x_2_off), (y_1_off, y_2_off), (z_1_off, z_2_off)), mode='minimum')
109
    _, _, padded_dim1, padded_dim2, padded_dim3 = img.shape
110
111
    out2_shape = (img.shape[0], num_labels, img.shape[2], img.shape[3], img.shape[4])
112
    out1_shape = (img.shape[0], num_labels2, img.shape[2], img.shape[3], img.shape[4])
113
114
    out1_total = np.zeros(out1_shape, dtype=np.float16)
115
    out1_counter = np.zeros(out1_shape, dtype=np.int8)
116
    out2_total = np.zeros(out2_shape)
117
    out2_counter = np.zeros(out2_shape)
118
    
119
    extra_p = extra_patch / 2
120
121
    for i in range(0, padded_dim1 - p_size + 1, stride):
122
        for j in range(0, padded_dim2 - p_size + 1, stride):
123
            for k in range(0, padded_dim3 - p_size + 1, stride):
124
                if extra_p != 0:
125
                    i_l, i_r = getExtraPatchOffsets(i, 0, padded_dim1 - p_size, extra_p)
126
                    j_l, j_r = getExtraPatchOffsets(j, 0, padded_dim2 - p_size, extra_p)
127
                    k_l, k_r = getExtraPatchOffsets(k, 0, padded_dim3 - p_size, extra_p)
128
129
                    img_patch = img[:,:, (i-i_l):(i+p_size+i_r),(j-j_l):(j+p_size+j_r),(k-k_l):(k+p_size+k_r)]
130
131
                    out1_np, out2_np = getPatchPrediction(img_patch, model, useGPU, gpu0, extra_pad = extra_pad, test_augm = test_augm)
132
                    out1_np = removePatchOffset(out1_np, i_l, i_r, j_l, j_r, k_l, k_r)
133
                    out2_np = removePatchOffset(out2_np, i_l, i_r, j_l, j_r, k_l, k_r)
134
135
                    if priv_eval:
136
                        out1_total[:,:, i:i+p_size,j:j+p_size,k:k+p_size] += out1_np
137
                        out1_counter[:, :, i:i+p_size, j:j+p_size, k:k+p_size] += 1
138
                    out2_total[:,:, i:i+p_size,j:j+p_size,k:k+p_size] += out2_np
139
                    out2_counter[:, :, i:i+p_size, j:j+p_size, k:k+p_size] += 1
140
141
                else:
142
                    img_patch = img[:, :, i:i+p_size, j:j+p_size, k:k+p_size]
143
                    #make a prediction on this image patch, adding extra padding during prediction and augmenting
144
                    #the result is of the same shape and size as the original img patch
145
                    out1_np, out2_np = getPatchPrediction(img_patch, model, useGPU, gpu0, extra_pad = extra_pad, test_augm = test_augm)
146
                    #too memory intensive
147
                    if priv_eval:
148
                        out1_total[:, :, i:i+p_size, j:j+p_size, k:k+p_size] += out1_np.astype(np.float16)
149
                        out1_counter[:, :, i:i+p_size, j:j+p_size, k:k+p_size] += 1
150
                    out2_total[:, :, i:i+p_size, j:j+p_size, k:k+p_size] += out2_np
151
                    out2_counter[:, :, i:i+p_size, j:j+p_size, k:k+p_size] += 1
152
    if priv_eval:
153
        out1_total = out1_total / out1_counter
154
    out2_total = out2_total / out2_counter
155
    #remove padding from predictions
156
    out1_total = out1_total[:, :, x_1_off:-x_2_off, y_1_off:-y_2_off, z_1_off:-z_2_off]
157
    out2_total = out2_total[:, :, x_1_off:-x_2_off, y_1_off:-y_2_off, z_1_off:-z_2_off]
158
    return out1_total, out2_total
159
160
def getExtraPatchOffsets(v, low_bound, upper_bound, extra_p):
161
    v_left = 0
162
    v_right = 0
163
    if v - extra_p > low_bound:
164
        v_left = extra_p
165
    if v + extra_p < upper_bound:
166
        v_right = extra_p  
167
    return v_left, v_right
168
169
#list of tuple [(i_l, i_r), (j_l, j_r)]
170
def removePatchOffset(np_arr, i_l, i_r, j_l, j_r, k_l, k_r):
171
    bn, c, s_i, s_j, s_k = np_arr.shape
172
    return np_arr[:,:,(i_l):(s_i-i_r), (j_l):(s_j-j_r), (k_l):(s_k-k_r)]
173
174
175
def getPatchPrediction(img_patch, model, useGPU, gpu0, extra_pad = 10, test_augm = False):
176
    pd = extra_pad/2
177
    padding = ((0,0), (0,0), (pd, pd), (pd, pd), (pd,pd))
178
    img_patch = np.pad(img_patch, padding, 'constant')
179
180
    num_augm = 1
181
    if test_augm:
182
        num_augm = 3
183
184
    out1_np_total = None
185
    out2_np_total = None
186
    for i in range(num_augm):
187
        img_patch_cp = np.copy(img_patch)
188
        if test_augm and i != 0:
189
            #apply augmentation
190
            rot_x, rot_y, rot_z = AUG.getRotationVal([10,10,10])
191
            zoom_val = AUG.getScalingVal(0.9, 1.1)
192
193
            img_patch_cp = AUG.applyScale([img_patch_cp], zoom_val, [3])[0]
194
            img_patch_cp = AUG.applyRotation([img_patch_cp], [rot_x, rot_y, rot_z], [3])[0]
195
        if useGPU:
196
            out1, out2 = model(Variable(torch.from_numpy(img_patch_cp).float(),volatile=True).cuda(gpu0))
197
        else:
198
            out1, out2 = model(Variable(torch.from_numpy(img_patch_cp).float(),volatile=True))
199
200
        out1_np = out1.data[0].cpu().numpy()
201
        out2_np = out2.data[0].cpu().numpy()
202
        del out1, out2
203
        #output is (1 x 3 x dim1 x dim2 x dim3)
204
        out1_np = out1_np[np.newaxis,:]
205
        out2_np = out2_np[np.newaxis,:]
206
207
        if test_augm and i != 0:
208
            temp2 = np.copy(out2_np)
209
            out2_np = None
210
            rev_zoom_i = float(img_patch.shape[2]) / img_patch_cp.shape[2]
211
            rev_zoom_j = float(img_patch.shape[3]) / img_patch_cp.shape[3]
212
            rev_zoom_k = float(img_patch.shape[4]) / img_patch_cp.shape[4]
213
214
            for j in range(temp2.shape[1]):
215
                r2 = AUG.applyRotation([temp2[:,j:j+1,:,:,:]], [-rot_x, -rot_y, -rot_z], [3])[0]
216
                r2 = AUG.applyScale([r2], [rev_zoom_i,rev_zoom_j,rev_zoom_k], [3])[0]
217
                if not isinstance(out2_np, np.ndarray):
218
                    out2_np = np.zeros([1, temp2.shape[1], r2.shape[2], r2.shape[3], r2.shape[4]])
219
                out2_np[:, j,:,:,:] = r2
220
221
        out2_np = numpySoftmax(out2_np, 1)
222
223
        nb, c, n_i, n_j, n_k = out2_np.shape
224
225
        if not isinstance(out1_np_total, np.ndarray):
226
            out1_np_total = out1_np[:,:,(pd):(n_i-pd),(pd):(n_j-pd),(pd):(n_k-pd)]
227
            out2_np_total = out2_np[:,:,(pd):(n_i-pd),(pd):(n_j-pd),(pd):(n_k-pd)]
228
        else:
229
            out2_np_total += out2_np[:,:,(pd):(n_i-pd),(pd):(n_j-pd),(pd):(n_k-pd)]
230
    return (out1_np_total), (out2_np_total / num_augm)
231
232
def numpySoftmax(x, axis_):
233
    e_x = np.exp(x - np.max(x))
234
    return e_x / (e_x.sum(axis=axis_) + 0.00001)