Diff of /eval.py [000000] .. [38391a]

Switch to unified view

a b/eval.py
1
from __future__ import absolute_import, division, print_function
2
3
from os import environ, getcwd
4
from os.path import join
5
6
import keras
7
import numpy as np
8
import pandas as pd
9
import sklearn as skl
10
import tensorflow as tf
11
from keras.applications.vgg19 import VGG19
12
from keras.applications import DenseNet169, InceptionResNetV2, DenseNet201
13
from keras.applications import NASNetMobile
14
from keras.layers import Dense, GlobalAveragePooling2D
15
from keras.metrics import binary_accuracy, binary_crossentropy
16
from keras.models import Model
17
from keras.optimizers import Adam
18
from keras.preprocessing.image import ImageDataGenerator
19
20
from mura import Mura
21
22
pd.set_option('display.max_rows', 20)
23
pd.set_option('precision', 4)
24
np.set_printoptions(precision=4)
25
26
environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # Shut up tensorflow!
27
print("tf : {}".format(tf.__version__))
28
print("keras : {}".format(keras.__version__))
29
print("numpy : {}".format(np.__version__))
30
print("pandas : {}".format(pd.__version__))
31
print("sklearn : {}".format(skl.__version__))
32
33
# Hyper-parameters / Globals
34
BATCH_SIZE = 8  # tweak to your GPUs capacity
35
IMG_HEIGHT = 420  # ResNetInceptionv2 & Xception like 299, ResNet50/VGG/Inception 224, NASM 331
36
IMG_WIDTH = IMG_HEIGHT
37
CHANNELS = 3
38
DIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS)  # blame theano
39
MODEL_TO_EVAL1 = './models/DenseNet169_221_NEW_HIST.hdf5'
40
MODEL_TO_EVAL2 = './models/DenseNet169_320_NEW_HIST.hdf5'
41
MODEL_TO_EVAL3 = './models/DenseNet169_420_NEW_HIST.hdf5'
42
MODEL_TO_EVAL4 = './models/DenseNet169_520_NEW_HIST.hdf5'
43
MODEL_TO_EVAL5 = './models/DenseNet169_620_NEW_HIST.hdf5'
44
#MODEL_TO_EVAL6 = './models/DenseNet169_420_SHOULDER_NEW_HIST.hdf5'
45
#MODEL_TO_EVAL7 = './models/DenseNet169_420_HAND_NEW_HIST.hdf5'
46
47
#MODEL_TO_EVAL3 = './models/DenseNet169_420_FOREARM.hdf5'
48
#MODEL_TO_EVAL4 = './models/DenseNet169_420_HAND.hdf5'
49
#MODEL_TO_EVAL5 = './models/DenseNet169_420_HUMERUS.hdf5'
50
#MODEL_TO_EVAL6 = './models/DenseNet169_420_SHOULDER.hdf5'
51
#MODEL_TO_EVAL7 = './models/DenseNet169_420_WRIST.hdf5'
52
53
54
DATA_DIR = 'MURA-v1.1/'
55
EVAL_CSV = 'valid.csv'
56
EVAL_DIR = 'data/val'
57
58
59
# load up our csv with validation factors
60
data_dir = join(getcwd(), DATA_DIR)
61
eval_csv = join(data_dir, EVAL_CSV)
62
df = pd.read_csv(eval_csv, names=['img', 'label'], header=None)
63
eval_imgs = df.img.values.tolist()
64
eval_labels = df.label.values.tolist()
65
66
eval_datagen = ImageDataGenerator(rescale=1. / 255)
67
eval_generator = eval_datagen.flow_from_directory(
68
    EVAL_DIR, class_mode='binary', shuffle=False, target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE)
69
n_samples = eval_generator.samples
70
71
base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False)
72
x = base_model.output
73
x = GlobalAveragePooling2D(name='avg_pool')(x)  # comment for RESNET
74
x = Dense(1, activation='sigmoid', name='predictions')(x)
75
model = Model(inputs=base_model.input, outputs=x)
76
model.load_weights(MODEL_TO_EVAL1)
77
model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy'])
78
score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE)
79
#print(model.metrics_names)
80
print('==> Metrics with eval')
81
print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc))
82
y_pred1 = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)
83
84
#IMG_HEIGHT = 520
85
#IMG_WIDTH  = 520
86
DIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS)
87
eval_generator = eval_datagen.flow_from_directory(
88
    EVAL_DIR, class_mode='binary', shuffle=False, target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE)
89
90
base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False)
91
x = base_model.output
92
x = GlobalAveragePooling2D(name='avg_pool')(x)  # comment for RESNET
93
x = Dense(1, activation='sigmoid', name='predictions')(x)
94
model = Model(inputs=base_model.input, outputs=x)
95
model.load_weights(MODEL_TO_EVAL2)
96
model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy'])
97
score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE)
98
#print(model.metrics_names)
99
print('==> Metrics with eval')
100
print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc))
101
y_pred2 = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)
102
103
IMG_HEIGHT = 420
104
IMG_WIDTH  = 420
105
DIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS)
106
eval_generator = eval_datagen.flow_from_directory(
107
    EVAL_DIR, class_mode='binary', shuffle=False,target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size =BATCH_SIZE)
108
109
base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False)
110
x = base_model.output
111
x = GlobalAveragePooling2D(name='avg_pool')(x)  # comment for RESNET
112
x = Dense(1, activation='sigmoid', name='predictions')(x)
113
model = Model(inputs=base_model.input, outputs=x)
114
model.load_weights(MODEL_TO_EVAL3)
115
model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy'])
116
score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE)
117
#print(model.metrics_names)
118
print('==> Metrics with eval')
119
print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc))
120
y_pred3 = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)
121
122
123
#IMG_HEIGHT = 520
124
#IMG_WIDTH  = 520
125
DIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS)
126
eval_generator = eval_datagen.flow_from_directory(
127
       EVAL_DIR, class_mode='binary', shuffle=False,target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE)
128
base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False)
129
x = base_model.output
130
x = GlobalAveragePooling2D(name='avg_pool')(x)  # comment for RESNET
131
x = Dense(1, activation='sigmoid', name='predictions')(x)
132
model = Model(inputs=base_model.input, outputs=x)
133
model.load_weights(MODEL_TO_EVAL4)
134
model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy'])
135
score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE)
136
#print(model.metrics_names)
137
print('==> Metrics with eval')
138
print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc))
139
y_pred4 = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)
140
141
IMG_HEIGHT = 420
142
IMG_WIDTH  = 420
143
DIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS)
144
eval_generator = eval_datagen.flow_from_directory(
145
     EVAL_DIR, class_mode='binary', shuffle=False,target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE)
146
base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False)
147
x = base_model.output
148
x = GlobalAveragePooling2D(name='avg_pool')(x)  # comment for RESNET
149
x = Dense(1, activation='sigmoid', name='predictions')(x)
150
model = Model(inputs=base_model.input, outputs=x)
151
model.load_weights(MODEL_TO_EVAL5)
152
model.compile(optimizer=Adam(lr=1e-3), loss=binary_crossentropy, metrics=['binary_accuracy'])
153
score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE)
154
#print(model.metrics_names)
155
print('==> Metrics with eval')
156
print("loss :{:0.4f} \t Accuracy:{:0.4f}".format(score, acc))
157
y_pred5 = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)
158
159
#print(y_pred)
160
df_filenames = pd.Series(np.array(eval_generator.filenames), name='filenames')
161
df_classes   = pd.Series(np.array(eval_generator.classes), name='classes')
162
163
#print(eval_generator.filenames)
164
#print(eval_generator.classes)
165
166
mura = Mura(eval_generator.filenames, y_true=eval_generator.classes, y_pred1=y_pred1, y_pred2=y_pred2, y_pred3=y_pred3, y_pred4=y_pred4, y_pred5=y_pred5)
167
print('==> Metrics with predict')
168
print(mura.metrics())
169
print(mura.metrics_by_encounter())
170
print(mura.metrics_by_study_type())