|
a |
|
b/ConvNet.py |
|
|
1 |
# -*- coding: utf-8 -*- |
|
|
2 |
""" |
|
|
3 |
Created on Wed Nov 02 11:36:23 2016 |
|
|
4 |
|
|
|
5 |
@author: seeker105 |
|
|
6 |
""" |
|
|
7 |
|
|
|
8 |
''' First CNN training and testing ''' |
|
|
9 |
|
|
|
10 |
from keras.models import Sequential |
|
|
11 |
from keras.models import Model |
|
|
12 |
from keras.layers.advanced_activations import LeakyReLU |
|
|
13 |
from keras.layers import Input, merge, Convolution2D, MaxPooling2D |
|
|
14 |
from keras.layers import UpSampling2D, Activation, Dropout |
|
|
15 |
from keras.layers import Dense, Flatten, Reshape |
|
|
16 |
from keras.optimizers import Adam, SGD |
|
|
17 |
from keras.layers.normalization import BatchNormalization |
|
|
18 |
from keras.initializers import constant |
|
|
19 |
from keras.regularizers import l1_l2 |
|
|
20 |
from keras import backend as K |
|
|
21 |
from keras.engine.topology import Layer |
|
|
22 |
from theano.tensor.nnet.abstract_conv import bilinear_upsampling |
|
|
23 |
|
|
|
24 |
#bilinear upsampling layer |
|
|
25 |
class Deconv2D(Layer): |
|
|
26 |
def __init__(self, ratio, **kwargs): |
|
|
27 |
self.ratio = ratio |
|
|
28 |
super(Deconv2D, self).__init__(**kwargs) |
|
|
29 |
def build(self, input_shape): |
|
|
30 |
super(Deconv2D,self).build(input_shape) |
|
|
31 |
def call(self, x, mask=None): |
|
|
32 |
return bilinear_upsampling(x, ratio=self.ratio) |
|
|
33 |
def get_output_shape_for(self, input_shape): |
|
|
34 |
return (input_shape[0], input_shape[1], |
|
|
35 |
input_shape[2] * self.ratio, input_shape[3] * self.ratio) |
|
|
36 |
|
|
|
37 |
|
|
|
38 |
class LeNet: |
|
|
39 |
@staticmethod |
|
|
40 |
def build_Pereira(w, h, d, classes, weightsPath = None, alp = 0.333, dropout = 0.1): |
|
|
41 |
'''INPUT: |
|
|
42 |
INPUT WIDTH, HEIGHT, DEPTH, NUMBER OF OUTPUT CLASSES, PRELOADED WEIGHTS, PARAMETER FOR LEAKYReLU, DROPOUT PROBABILITY |
|
|
43 |
OUTPUT: |
|
|
44 |
TRAINED CNN ARCHITECTURE |
|
|
45 |
''' |
|
|
46 |
K.set_image_dim_ordering('th') |
|
|
47 |
model = Sequential() |
|
|
48 |
|
|
|
49 |
|
|
|
50 |
#first set of CONV => CONV => CONV => LReLU => MAXPOOL |
|
|
51 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (d, h, w), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
52 |
model.add(LeakyReLU(alpha=alp)) |
|
|
53 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
54 |
model.add(LeakyReLU(alpha=alp)) |
|
|
55 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
56 |
model.add(LeakyReLU(alpha=alp)) |
|
|
57 |
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) |
|
|
58 |
|
|
|
59 |
#second set of CONV => CONV => CONV => LReLU => MAXPOOL |
|
|
60 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
61 |
model.add(LeakyReLU(alpha=alp)) |
|
|
62 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
63 |
model.add(LeakyReLU(alpha=alp)) |
|
|
64 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
65 |
model.add(LeakyReLU(alpha = alp)) |
|
|
66 |
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) |
|
|
67 |
|
|
|
68 |
#Fully connected layers |
|
|
69 |
|
|
|
70 |
# FC => LReLU => FC => LReLU |
|
|
71 |
model.add(Flatten()) |
|
|
72 |
model.add(Dropout(0.1)) |
|
|
73 |
model.add(Dense(256, kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1))) |
|
|
74 |
model.add(LeakyReLU(alp)) |
|
|
75 |
model.add(Dropout(0.1)) |
|
|
76 |
model.add(Dense(256, kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1))) |
|
|
77 |
model.add(LeakyReLU(alp)) |
|
|
78 |
model.add(Dropout(0.1)) |
|
|
79 |
|
|
|
80 |
# FC => SOFTMAX |
|
|
81 |
model.add(Dense(classes, kernel_initializer = 'glorot_normal', bias_initializer = constant(0.1))) |
|
|
82 |
model.add(Activation("softmax")) |
|
|
83 |
|
|
|
84 |
#if a pre-trained model is applied, load the weights |
|
|
85 |
if weightsPath is not None: |
|
|
86 |
model.load_weights(weightsPath) |
|
|
87 |
|
|
|
88 |
return model |
|
|
89 |
|
|
|
90 |
@staticmethod |
|
|
91 |
def build_Nikki(w, h, d, classes, l1=0.01, l2=0.01): |
|
|
92 |
|
|
|
93 |
K.set_image_dim_ordering('th') |
|
|
94 |
model = Sequential() |
|
|
95 |
model.add(Convolution2D(64, (7, 7), activation = 'relu', kernel_regularizer=l1_l2(), input_shape = (d, h, w) )) |
|
|
96 |
model.add(BatchNormalization(mode=0, axis=1)) |
|
|
97 |
model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1))) |
|
|
98 |
model.add(Dropout(0.5)) |
|
|
99 |
model.add(Convolution2D(128, (5, 5), activation = 'relu', kernel_regularizer=l1_l2() )) |
|
|
100 |
model.add(BatchNormalization(mode=0, axis=1)) |
|
|
101 |
model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1))) |
|
|
102 |
model.add(Dropout(0.5)) |
|
|
103 |
model.add(Convolution2D(128, (5, 5), activation = 'relu', kernel_regularizer=l1_l2() )) |
|
|
104 |
model.add(BatchNormalization(mode=0, axis=1)) |
|
|
105 |
model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1))) |
|
|
106 |
model.add(Dropout(0.5)) |
|
|
107 |
model.add(Convolution2D(64, (3, 3), activation = 'relu', kernel_regularizer=l1_l2() )) |
|
|
108 |
model.add(Dropout(0.25)) |
|
|
109 |
model.add(Flatten()) |
|
|
110 |
model.add(Dense(5)) |
|
|
111 |
model.add(Activation('softmax')) |
|
|
112 |
sgd = SGD(lr=0.001, decay=0.01, momentum=0.9) |
|
|
113 |
model.compile(loss='categorical_crossentropy', optimizer=sgd) |
|
|
114 |
return model |
|
|
115 |
|
|
|
116 |
@staticmethod |
|
|
117 |
def unet(w, h, d): |
|
|
118 |
K.set_image_dim_ordering('th') |
|
|
119 |
|
|
|
120 |
inputs = Input((d, h, w)) |
|
|
121 |
conv1 = Convolution2D(32, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(inputs) |
|
|
122 |
conv1 = Convolution2D(32, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv1) |
|
|
123 |
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) |
|
|
124 |
|
|
|
125 |
conv2 = Convolution2D(64, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(pool1) |
|
|
126 |
conv2 = Convolution2D(64, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv2) |
|
|
127 |
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) |
|
|
128 |
|
|
|
129 |
conv3 = Convolution2D(128, kernel_size=(3, 3), activation='relu', padding='same', data_format = 'channels_first')(pool2) |
|
|
130 |
conv3 = Convolution2D(128, kernel_size=(3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv3) |
|
|
131 |
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) |
|
|
132 |
|
|
|
133 |
conv4 = Convolution2D(256, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(pool3) |
|
|
134 |
conv4 = Convolution2D(256, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv4) |
|
|
135 |
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) |
|
|
136 |
|
|
|
137 |
conv5 = Convolution2D(512, kernel_size=(3, 3), activation='relu', padding='same', data_format = 'channels_first')(pool4) |
|
|
138 |
conv5 = Convolution2D(512, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv5) |
|
|
139 |
# pool5 = MaxPooling2D(pool_size=(2, 2))(conv5) |
|
|
140 |
|
|
|
141 |
# convdeep = Convolution2D(1024, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(pool5) |
|
|
142 |
# convdeep = Convolution2D(1024, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(convdeep) |
|
|
143 |
|
|
|
144 |
# upmid = merge([Convolution2D(512, kernel_size = (2, 2), padding='same', data_format = 'channels_first')(UpSampling2D(size=(2, 2), data_format = 'channels_first')(convdeep)), conv5], mode='concat', concat_axis=1) |
|
|
145 |
# convmid = Convolution2D(512, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(upmid) |
|
|
146 |
# convmid = Convolution2D(512, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(convmid) |
|
|
147 |
|
|
|
148 |
up6 = merge([Convolution2D(256, kernel_size = (2, 2),activation='relu', padding='same', data_format = 'channels_first')(UpSampling2D(size=(2, 2), data_format = 'channels_first')(conv5)), conv4], mode='concat', concat_axis=1) |
|
|
149 |
conv6 = Convolution2D(256, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(up6) |
|
|
150 |
conv6 = Convolution2D(256, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv6) |
|
|
151 |
|
|
|
152 |
up7 = merge([Convolution2D(128, kernel_size = (2, 2),activation='relu', padding='same', data_format = 'channels_first')(UpSampling2D(size=(2, 2), data_format = 'channels_first')(conv6)), conv3], mode='concat', concat_axis=1) |
|
|
153 |
conv7 = Convolution2D(128, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(up7) |
|
|
154 |
conv7 = Convolution2D(128, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv7) |
|
|
155 |
|
|
|
156 |
up8 = merge([Convolution2D(64, kernel_size = (2, 2),activation='relu', padding='same', data_format = 'channels_first')(UpSampling2D(size=(2, 2), data_format = 'channels_first')(conv7)), conv2], mode='concat', concat_axis=1) |
|
|
157 |
conv8 = Convolution2D(64, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(up8) |
|
|
158 |
conv8 = Convolution2D(64, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv8) |
|
|
159 |
|
|
|
160 |
up9 = merge([Convolution2D(32, kernel_size = (2, 2),activation='relu', padding='same', data_format = 'channels_first')(UpSampling2D(size=(2, 2), data_format = 'channels_first')(conv8)), conv1], mode='concat', concat_axis=1) |
|
|
161 |
conv9 = Convolution2D(32, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(up9) |
|
|
162 |
conv9 = Convolution2D(32, kernel_size = (3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv9) |
|
|
163 |
|
|
|
164 |
conv10 = Convolution2D(5, kernel_size=(1, 1), padding='same', data_format = 'channels_first')(conv9) |
|
|
165 |
flat = Reshape((5, h*w))(conv10) |
|
|
166 |
out = Activation('softmax')(flat) |
|
|
167 |
model = Model(input=inputs, output=out) |
|
|
168 |
model.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) |
|
|
169 |
|
|
|
170 |
return model |
|
|
171 |
|
|
|
172 |
|
|
|
173 |
#let's see how a Conv Net without any pooling layers fares, as pooling layers are known to reduce data, but stop overfitting |
|
|
174 |
@staticmethod |
|
|
175 |
def build_Pereira_mod(w, h, d, classes, weightsPath = None, alp = 0.333, dropout = 0.1): |
|
|
176 |
'''INPUT: |
|
|
177 |
INPUT WIDTH, HEIGHT, DEPTH, NUMBER OF OUTPUT CLASSES, PRELOADED WEIGHTS, PARAMETER FOR LEAKYReLU, DROPOUT PROBABILITY |
|
|
178 |
OUTPUT: |
|
|
179 |
TRAINED CNN ARCHITECTURE |
|
|
180 |
''' |
|
|
181 |
K.set_image_dim_ordering('th') |
|
|
182 |
model = Sequential() |
|
|
183 |
|
|
|
184 |
|
|
|
185 |
#first set of CONV => CONV => CONV => LReLU => MAXPOOL |
|
|
186 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (d, h, w), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
187 |
model.add(LeakyReLU(alpha=alp)) |
|
|
188 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
189 |
model.add(LeakyReLU(alpha=alp)) |
|
|
190 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
191 |
model.add(LeakyReLU(alpha=alp)) |
|
|
192 |
|
|
|
193 |
#second set of CONV => CONV => CONV => LReLU => MAXPOOL |
|
|
194 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
195 |
model.add(LeakyReLU(alpha=alp)) |
|
|
196 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
197 |
model.add(LeakyReLU(alpha=alp)) |
|
|
198 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
199 |
model.add(LeakyReLU(alpha = alp)) |
|
|
200 |
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1))) |
|
|
201 |
|
|
|
202 |
#Fully connected layers |
|
|
203 |
|
|
|
204 |
# FC => LReLU => FC => LReLU |
|
|
205 |
model.add(Flatten()) |
|
|
206 |
model.add(Dropout(0.1)) |
|
|
207 |
model.add(Dense(128, kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1))) |
|
|
208 |
model.add(LeakyReLU(alp)) |
|
|
209 |
model.add(Dropout(0.1)) |
|
|
210 |
model.add(Dense(128, kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1))) |
|
|
211 |
model.add(LeakyReLU(alp)) |
|
|
212 |
model.add(Dropout(0.1)) |
|
|
213 |
|
|
|
214 |
# FC => SOFTMAX |
|
|
215 |
model.add(Dense(classes, kernel_initializer = 'glorot_normal', bias_initializer = constant(0.1))) |
|
|
216 |
model.add(Activation("softmax")) |
|
|
217 |
|
|
|
218 |
#if a pre-trained model is applied, load the weights |
|
|
219 |
if weightsPath is not None: |
|
|
220 |
model.load_weights(weightsPath) |
|
|
221 |
|
|
|
222 |
return model |
|
|
223 |
|
|
|
224 |
@staticmethod |
|
|
225 |
def build_Pereira_no_pooling(w, h, d, classes, weightsPath = None, alp = 0.333, dropout = 0.1): |
|
|
226 |
'''INPUT: |
|
|
227 |
INPUT WIDTH, HEIGHT, DEPTH, NUMBER OF OUTPUT CLASSES, PRELOADED WEIGHTS, PARAMETER FOR LEAKYReLU, DROPOUT PROBABILITY |
|
|
228 |
OUTPUT: |
|
|
229 |
TRAINED CNN ARCHITECTURE |
|
|
230 |
''' |
|
|
231 |
K.set_image_dim_ordering('th') |
|
|
232 |
model = Sequential() |
|
|
233 |
|
|
|
234 |
|
|
|
235 |
#first set of CONV => CONV => CONV => LReLU => MAXPOOL |
|
|
236 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (d, h, w), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
237 |
model.add(LeakyReLU(alpha=alp)) |
|
|
238 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
239 |
model.add(LeakyReLU(alpha=alp)) |
|
|
240 |
model.add(Convolution2D(64, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
241 |
model.add(LeakyReLU(alpha=alp)) |
|
|
242 |
model.add(Convolution2D(128, kernel_size=(3, 3), strides = (2, 2), padding="valid", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
243 |
model.add(LeakyReLU(alpha=alp)) |
|
|
244 |
|
|
|
245 |
#second set of CONV => CONV => CONV => LReLU => MAXPOOL |
|
|
246 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
247 |
model.add(LeakyReLU(alpha=alp)) |
|
|
248 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
249 |
model.add(LeakyReLU(alpha=alp)) |
|
|
250 |
model.add(Convolution2D(128, kernel_size=(3, 3), padding="same", data_format='channels_first', input_shape = (128, 16, 16), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
251 |
model.add(LeakyReLU(alpha = alp)) |
|
|
252 |
model.add(Convolution2D(256, kernel_size=(3, 3), strides = (2, 2), padding="valid", data_format='channels_first', input_shape = (64, 33, 33), kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1) )) |
|
|
253 |
model.add(LeakyReLU(alpha=alp)) |
|
|
254 |
#Fully connected layers |
|
|
255 |
|
|
|
256 |
# FC => LReLU => FC => LReLU |
|
|
257 |
model.add(Flatten()) |
|
|
258 |
model.add(Dropout(0.1)) |
|
|
259 |
model.add(Dense(128, kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1))) |
|
|
260 |
model.add(LeakyReLU(alp)) |
|
|
261 |
model.add(Dropout(0.1)) |
|
|
262 |
model.add(Dense(128, kernel_initializer = 'glorot_normal', bias_initializer=constant(0.1))) |
|
|
263 |
model.add(LeakyReLU(alp)) |
|
|
264 |
model.add(Dropout(0.1)) |
|
|
265 |
|
|
|
266 |
# FC => SOFTMAX |
|
|
267 |
model.add(Dense(classes, kernel_initializer = 'glorot_normal', bias_initializer = constant(0.1))) |
|
|
268 |
model.add(Activation("softmax")) |
|
|
269 |
|
|
|
270 |
#if a pre-trained model is applied, load the weights |
|
|
271 |
if weightsPath is not None: |
|
|
272 |
model.load_weights(weightsPath) |
|
|
273 |
|
|
|
274 |
return model |