|
a |
|
b/model.py |
|
|
1 |
from keras.layers import Dense, Flatten |
|
|
2 |
from keras.models import Model |
|
|
3 |
from keras.applications.vgg16 import VGG16 |
|
|
4 |
from keras.preprocessing.image import ImageDataGenerator |
|
|
5 |
|
|
|
6 |
from glob import glob |
|
|
7 |
import matplotlib.pyplot as plt |
|
|
8 |
|
|
|
9 |
# re-size all the images to this |
|
|
10 |
IMAGE_SIZE = [224, 224] |
|
|
11 |
|
|
|
12 |
train_path = 'dataset/TRAIN' |
|
|
13 |
valid_path = 'dataset/TEST' |
|
|
14 |
|
|
|
15 |
# add preprocessing layer to the front of VGG |
|
|
16 |
vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) |
|
|
17 |
|
|
|
18 |
# don't train existing weights |
|
|
19 |
for layer in vgg.layers: |
|
|
20 |
layer.trainable = False |
|
|
21 |
|
|
|
22 |
|
|
|
23 |
#useful for getting number of classes |
|
|
24 |
folders = glob('dataset/TRAIN/*') |
|
|
25 |
|
|
|
26 |
|
|
|
27 |
# our layers - you can add more if you want |
|
|
28 |
x = Flatten()(vgg.output) |
|
|
29 |
|
|
|
30 |
#add the sigmoid as the activation function |
|
|
31 |
prediction = Dense(1, activation='sigmoid')(x) |
|
|
32 |
|
|
|
33 |
# create a model object |
|
|
34 |
model = Model(inputs=vgg.input, outputs=prediction) |
|
|
35 |
|
|
|
36 |
# view the structure of the model |
|
|
37 |
model.summary() |
|
|
38 |
|
|
|
39 |
# tell the model what cost and optimization method to use |
|
|
40 |
model.compile( |
|
|
41 |
loss='binary_crossentropy', |
|
|
42 |
optimizer='adam', |
|
|
43 |
metrics=['accuracy'] |
|
|
44 |
) |
|
|
45 |
|
|
|
46 |
|
|
|
47 |
train_datagen = ImageDataGenerator(rescale = 1./255, |
|
|
48 |
shear_range = 0.2, |
|
|
49 |
zoom_range = 0.2, |
|
|
50 |
horizontal_flip = True) |
|
|
51 |
|
|
|
52 |
test_datagen = ImageDataGenerator(rescale = 1./255) |
|
|
53 |
|
|
|
54 |
training_set = train_datagen.flow_from_directory('dataset/TRAIN', |
|
|
55 |
target_size = (224, 224), |
|
|
56 |
batch_size = 64, |
|
|
57 |
class_mode = 'binary') |
|
|
58 |
|
|
|
59 |
test_set = test_datagen.flow_from_directory('dataset/TEST', |
|
|
60 |
target_size = (224, 224), |
|
|
61 |
batch_size = 64, |
|
|
62 |
class_mode = 'binary') |
|
|
63 |
|
|
|
64 |
# see which class represents 1 and which represents 0 |
|
|
65 |
training_set.class_indices |
|
|
66 |
|
|
|
67 |
# fit the model |
|
|
68 |
r = model.fit_generator( |
|
|
69 |
training_set, |
|
|
70 |
validation_data=test_set, |
|
|
71 |
epochs=4, |
|
|
72 |
steps_per_epoch=len(training_set), |
|
|
73 |
validation_steps=len(test_set) |
|
|
74 |
) |
|
|
75 |
# loss plots |
|
|
76 |
plt.plot(r.history['loss'], label='train loss') |
|
|
77 |
plt.plot(r.history['val_loss'], label='val loss') |
|
|
78 |
plt.legend() |
|
|
79 |
plt.show() |
|
|
80 |
plt.savefig('LossVal_loss') |
|
|
81 |
|
|
|
82 |
# accuracy plots |
|
|
83 |
plt.plot(r.history['accuracy'], label='train acc') |
|
|
84 |
plt.plot(r.history['val_accuracy'], label='val acc') |
|
|
85 |
plt.legend() |
|
|
86 |
plt.show() |
|
|
87 |
plt.savefig('AccVal_acc') |
|
|
88 |
|
|
|
89 |
#save our model in order to use it in web development |
|
|
90 |
#model.save('Esophageal_model.h5') |
|
|
91 |
|