[e056f2]: / helper_functions.py

Download this file

190 lines (157 with data), 6.4 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
# functions for future use
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Function to import and resize images
def load_prepare_images(filename, img_shape=224, scale=True):
"""[summary]
Args:
filename ([type]): [description]
img_shape (int, optional): [description]. Defaults to 224.
scale (bool, optional): [description]. Defaults to True.
"""
img = tf.io.read_file(filename)
img = tf.image.decode_jpeg(image)
img = tf.image.resize(img, [img_shape, img_shape])
if scale:
return img/255.
else:
return img
# Function to plot a confusion matrix
import numpy as np
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix
def create_confusion_matrix(y_true, y_pred, classes=None, figsize=(10, 10), text_size=15, norm=False, savefig=False):
"""[summary]
Args:
y_true ([type]): [description]
y_pred ([type]): [description]
classes ([type], optional): [description]. Defaults to None.
figsize ([type], optional): [description]. Defaults to (10, 10).
text_size (int, optional): [description]. Defaults to 15.
norm (bool, optional): [description]. Defaults to False.
savefig (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
"""
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
n_classes = cm.shape[0]
# Plot confusion matrix
fig, ax = plt.subplots(figsize=figsize)
cax = ax.matshow(cm, cmap=plt.cm.Blues)
fig.colorbar(cax)
if classes:
labels = classes
else:
labels = np.arange(cm.shap[0])
ax.set(title="Confusion Matrix",
ylabel="True label",
xlabel="Predicted label",
xticks = np.arange(n_classes),
yticks = np.arange(n_classes),
xticklabels = labels,
yticklabels = labels)
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
threshold = (cm.max() + cm.min()) / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if norm:
plt.text(j, i, f"{cm[i, k]*100:.1f}%",
horizontalalignment="center",
color="white" if cm[i, j] > threshold else "black",
size=text_size)
else:
plt.text(j, i, f"{cm[i, j]}",
horizontalalignment="center",
color="white" if cm[i, j] > threshold else "black",
size=text_size)
if savefig:
fig.savefig("confusion_matrix.png")
# Function to predict images and plot
def pred_plot(model, filename, class_names):
img = load_prepare_images(filename)
pred = model.predict(tf.expand_dims(img, axis=0))
if len(pred[0]) > 1:
pred_class = class_names[pred.argmax()]
else:
pred_class = class_names[int(tf.round(pred)[0][0])]
plt.imshow(img)
plt.title(f"Prediction: {pred_class}")
plt.axis(False);
import datetime
def create_tensorboard_callback(dir_name, experiment_name):
log_dir = dir_name + "/" + experiment_name + "/" +datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir
)
print(f"Saved Tensorboard logs to: {log_dir}")
return tensorboard_callback
# Function to plot validation and training separately
import matplotlib.pyplot as plt
def plot_loss_curves(history):
loss = history.history["loss"]
val_loss = history.history["val_loss"]
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
epochs = range(len(history.history["loss]"]))
plt.plot(epochs, loss, label="Training_loss")
plt.plot(epochs, val_loss, label="Validation_loss")
plt.title("Loss")
plt.xlabel("Epochs")
plt.legend()
plt.figure()
plt.plot(epochs, accuracy, label="Training_accuracy")
plt.plot(epochs, val_accuracy, label="Validation_accuracy")
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.legend();
def compare_history(original_history, new_history, inital_epochs=5):
acc = original_history.history["accuracy"]
loss = original_history.history["loss"]
val_acc = original_history.history["val_accuracy"]
val_loss = original_history.history["val_loss"]
total_acc = acc + new_history.history["accuracy"]
total_loss = loss + new_history.history["loss"]
total_val_acc = val_acc + new_history.history["val_accuracy"]
total_val_loss = val_loss + new_history.history["val_loss"]
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.plot(total_acc, label="Training_accuracy")
plt.plot(total_val_acc, label="Validation_accuracy")
plt.plot([initial_epochs-1, initial_epoch-1],
plt.ylim(), label="Start Fine Tuning")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(2, 1, 2)
plt.plot(total_loss, label="Training_loss")
plt.plot(total_val_loss, label="Validation_loss")
plt.plot([initial_epochs-1, initial_epoch-1],
plt.ylim(), label="Start Fine Tuning")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.xlabel("epoch")
plt.show()
# Function to unzip a file
import zipfile
def unzip_data(filename):
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall()
zip_ref.close()
# Function to walkthrough a directory and return a list of files
import os
def walk_dir(dir_path):
for dirpath, dirnames, filenames in os.walk(dir_path):
print(f"Directories {len(dirnames)} and images {len(filenames)} in '{dirpath}'. ")
# Function for evaluation, accuraccy, precision_recall_f1_score
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, f1_score
def calculate_results(y_true, y_pred):
model_accuracy = accuracy_score(y_true, y_pred) * 100
model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted")
model_results = {"accuracy": model_accuracy,
"precision": model_precision,
"recall": model_recall,
"f1": model_f1}
return model_results