|
a |
|
b/code/test-mask.py |
|
|
1 |
""" |
|
|
2 |
Purpose: train a machine learning segmenter that can segment out the nodules on a given 2D patient CT scan slice |
|
|
3 |
Note: |
|
|
4 |
- this will train from scratch, with no preloaded weights |
|
|
5 |
- weights are saved to unet.hdf5 in the specified output folder |
|
|
6 |
""" |
|
|
7 |
|
|
|
8 |
from __future__ import print_function |
|
|
9 |
|
|
|
10 |
import numpy as np |
|
|
11 |
from keras.models import Model |
|
|
12 |
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D |
|
|
13 |
from keras.optimizers import Adam |
|
|
14 |
from keras.optimizers import SGD |
|
|
15 |
from keras.callbacks import ModelCheckpoint, LearningRateScheduler |
|
|
16 |
from keras import backend as K |
|
|
17 |
import matplotlib.pyplot as plt |
|
|
18 |
|
|
|
19 |
WORKING_PATH = "/home/marshallee/Documents/lung/subset0/" |
|
|
20 |
IMG_ROWS = 512 |
|
|
21 |
IMG_COLS = 512 |
|
|
22 |
|
|
|
23 |
SMOOTH = 1. |
|
|
24 |
|
|
|
25 |
K.set_image_dim_ordering('th') # Theano dimension ordering in this code |
|
|
26 |
|
|
|
27 |
def dice_coef(y_true, y_pred): |
|
|
28 |
y_true_f = K.flatten(y_true) |
|
|
29 |
y_pred_f = K.flatten(y_pred) |
|
|
30 |
intersection = K.sum(y_true_f * y_pred_f) |
|
|
31 |
return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) + SMOOTH) |
|
|
32 |
|
|
|
33 |
def dice_coef_loss(y_true, y_pred): |
|
|
34 |
return -dice_coef(y_true, y_pred) |
|
|
35 |
|
|
|
36 |
def dice_coef_np(y_true,y_pred): |
|
|
37 |
y_true_f = y_true.flatten() |
|
|
38 |
y_pred_f = y_pred.flatten() |
|
|
39 |
intersection = np.sum(y_true_f * y_pred_f) |
|
|
40 |
return (2. * intersection + SMOOTH) / (np.sum(y_true_f) + np.sum(y_pred_f) + SMOOTH) |
|
|
41 |
|
|
|
42 |
|
|
|
43 |
def get_unet(): |
|
|
44 |
""" |
|
|
45 |
U-net architecture |
|
|
46 |
""" |
|
|
47 |
inputs = Input((1,IMG_ROWS, IMG_COLS)) |
|
|
48 |
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs) |
|
|
49 |
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1) |
|
|
50 |
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) |
|
|
51 |
|
|
|
52 |
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1) |
|
|
53 |
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2) |
|
|
54 |
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) |
|
|
55 |
|
|
|
56 |
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2) |
|
|
57 |
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3) |
|
|
58 |
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) |
|
|
59 |
|
|
|
60 |
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3) |
|
|
61 |
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4) |
|
|
62 |
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) |
|
|
63 |
|
|
|
64 |
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4) |
|
|
65 |
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5) |
|
|
66 |
|
|
|
67 |
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1) |
|
|
68 |
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6) |
|
|
69 |
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6) |
|
|
70 |
|
|
|
71 |
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1) |
|
|
72 |
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7) |
|
|
73 |
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7) |
|
|
74 |
|
|
|
75 |
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1) |
|
|
76 |
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8) |
|
|
77 |
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8) |
|
|
78 |
|
|
|
79 |
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1) |
|
|
80 |
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9) |
|
|
81 |
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9) |
|
|
82 |
|
|
|
83 |
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9) |
|
|
84 |
|
|
|
85 |
model = Model(input=inputs, output=conv10) |
|
|
86 |
|
|
|
87 |
model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef]) |
|
|
88 |
|
|
|
89 |
return model |
|
|
90 |
|
|
|
91 |
|
|
|
92 |
def test(): |
|
|
93 |
imgs_test = np.load(WORKING_PATH+"trainImages.npy").astype(np.float32) |
|
|
94 |
imgs_mask_test_true = np.load(WORKING_PATH+"trainMasks.npy").astype(np.float32) |
|
|
95 |
num = len(imgs_test) |
|
|
96 |
|
|
|
97 |
mean_test = np.mean(imgs_test) # mean for data centering |
|
|
98 |
std_test = np.std(imgs_test) # std for data normalization |
|
|
99 |
imgs_test -= mean_test # images should already be standardized, but just in case |
|
|
100 |
imgs_test /= std_test |
|
|
101 |
model = get_unet() |
|
|
102 |
model.load_weights('unet2.hdf5') |
|
|
103 |
predMask = model.predict(imgs_test) |
|
|
104 |
print('pred shape: ', predMask.shape) |
|
|
105 |
|
|
|
106 |
fig,ax = plt.subplots(2,2,figsize=[8,8]) |
|
|
107 |
ax[0,0].imshow(imgs_test[i][0],cmap='gray') |
|
|
108 |
ax[0,1].imshow(predMask[i][0],cmap='gray') |
|
|
109 |
ax[1,0].imshow(imgs_test[i][0]*predMask[i][0],cmap='gray') |
|
|
110 |
plt.show() |
|
|
111 |
|
|
|
112 |
|
|
|
113 |
|
|
|
114 |
if __name__ == '__main__': |
|
|
115 |
test() |