|
a |
|
b/src/model_defs/vgg16.py |
|
|
1 |
############## Model Definition goes here ############## |
|
|
2 |
num_chan_in = 2 |
|
|
3 |
height = 224 |
|
|
4 |
width = 224 |
|
|
5 |
num_classes = 6 |
|
|
6 |
|
|
|
7 |
inputs = K.layers.Input([height, width, num_chan_in], name="DICOM") |
|
|
8 |
|
|
|
9 |
params = dict(kernel_size=(3, 3), activation="relu", |
|
|
10 |
padding="same", |
|
|
11 |
kernel_initializer="he_uniform") |
|
|
12 |
|
|
|
13 |
|
|
|
14 |
convA = K.layers.Conv2D(name="convAa", filters=64, **params)(inputs) |
|
|
15 |
convA = K.layers.Conv2D(name="convAb", filters=64, **params)(convA) #stride defaults to (1,1) for k.layers.conv |
|
|
16 |
poolA = K.layers.MaxPooling2D(name="poolA", pool_size=(2, 2))(convA) #stride defaults to pool size |
|
|
17 |
|
|
|
18 |
convB = K.layers.Conv2D(name="convBa", filters=128, **params)(poolA) |
|
|
19 |
convB = K.layers.Conv2D(name="convBb", filters=128, **params)(convB) |
|
|
20 |
poolB = K.layers.MaxPooling2D(name="poolB", pool_size=(2, 2))(convB) |
|
|
21 |
|
|
|
22 |
|
|
|
23 |
convC = K.layers.Conv2D(name="convCa", filters=256, **params)(poolB) |
|
|
24 |
convC = K.layers.Conv2D(name="convCb", filters=256, **params)(convC) |
|
|
25 |
poolC = K.layers.MaxPooling2D(name="poolC", pool_size=(2, 2))(convC) |
|
|
26 |
|
|
|
27 |
convD = K.layers.Conv2D(name="convDa", filters=512, **params)(poolC) |
|
|
28 |
convD = K.layers.Conv2D(name="convDb", filters=512, **params)(convD) |
|
|
29 |
poolD = K.layers.MaxPooling2D(name="poolD", pool_size=(2, 2))(convD) |
|
|
30 |
|
|
|
31 |
convE = K.layers.Conv2D(name="convEa", filters=512, **params)(poolD) |
|
|
32 |
convE = K.layers.Conv2D(name="convEb", filters=512, **params)(convE) |
|
|
33 |
poolE = K.layers.MaxPooling2D(name="poolE", pool_size=(2, 2))(convE) |
|
|
34 |
|
|
|
35 |
# img size after 5 MaxPooling (2,2) = 16*16*512 = 131072 post flattening layer |
|
|
36 |
flat = K.layers.Flatten()(poolC) # 131072 |
|
|
37 |
|
|
|
38 |
dense1 = K.layers.Dense(4096, activation="relu")(flat) #VGG paper used dim which was a factor of 6.125, we followed similar strategy |
|
|
39 |
dense2 = K.layers.Dense(4096, activation="relu")(dense1) |
|
|
40 |
dense3 = K.layers.Dense(num_classes, activation="sigmoid")(dense2) |
|
|
41 |
|
|
|
42 |
|
|
|
43 |
model = K.models.Model(inputs=[inputs], outputs=[dense3]) |
|
|
44 |
opt = K.optimizers.Adam(learning_rate=0.01) |
|
|
45 |
model.compile(loss=K.losses.categorical_crossentropy, |
|
|
46 |
optimizer=opt, |
|
|
47 |
metrics=['accuracy']) |
|
|
48 |
|