Diff of /leukemia detection.ipynb [000000] .. [198e90]

Switch to unified view

a b/leukemia detection.ipynb
1
{
2
 "cells": [
3
  {
4
   "cell_type": "code",
5
   "execution_count": 1,
6
   "id": "59654c10",
7
   "metadata": {},
8
   "outputs": [
9
    {
10
     "name": "stdout",
11
     "output_type": "stream",
12
     "text": [
13
      "modules loaded\n"
14
     ]
15
    }
16
   ],
17
   "source": [
18
    "import os\n",
19
    "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n",
20
    "\n",
21
    "import tensorflow as tf\n",
22
    "from tensorflow import keras\n",
23
    "from tensorflow.keras import backend as K\n",
24
    "from tensorflow.keras.layers import Dense, Activation,Dropout,Conv2D, MaxPooling2D,BatchNormalization, Flatten\n",
25
    "from tensorflow.keras.optimizers import Adam, Adamax\n",
26
    "from tensorflow.keras.metrics import categorical_crossentropy\n",
27
    "from tensorflow.keras import regularizers\n",
28
    "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
29
    "from tensorflow.keras.models import Model, load_model, Sequential\n",
30
    "import numpy as np\n",
31
    "import pandas as pd\n",
32
    "import shutil\n",
33
    "import time\n",
34
    "import cv2 as cv2\n",
35
    "from tqdm import tqdm\n",
36
    "from sklearn.model_selection import train_test_split\n",
37
    "import matplotlib.pyplot as plt\n",
38
    "from matplotlib.pyplot import imshow\n",
39
    "import seaborn as sns\n",
40
    "sns.set_style('darkgrid')\n",
41
    "from PIL import Image\n",
42
    "from sklearn.metrics import confusion_matrix, classification_report\n",
43
    "from IPython.core.display import display, HTML\n",
44
    "# stop annoying tensorflow warning messages\n",
45
    "import logging\n",
46
    "logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n",
47
    "print ('modules loaded')"
48
   ]
49
  },
50
  {
51
   "cell_type": "code",
52
   "execution_count": 2,
53
   "id": "fa7de5f3",
54
   "metadata": {},
55
   "outputs": [],
56
   "source": [
57
    "def show_image_samples(gen ):\n",
58
    "    t_dict=gen.class_indices\n",
59
    "    classes=list(t_dict.keys())    \n",
60
    "    images,labels=next(gen) # get a sample batch from the generator \n",
61
    "    plt.figure(figsize=(20, 20))\n",
62
    "    length=len(labels)\n",
63
    "    if length<25:   #show maximum of 25 images\n",
64
    "        r=length\n",
65
    "    else:\n",
66
    "        r=25\n",
67
    "    for i in range(r):\n",
68
    "        plt.subplot(5, 5, i + 1)\n",
69
    "        image=images[i]/255\n",
70
    "        plt.imshow(image)\n",
71
    "        index=np.argmax(labels[i])\n",
72
    "        class_name=classes[index]\n",
73
    "        plt.title(class_name, color='blue', fontsize=12)\n",
74
    "        plt.axis('off')\n",
75
    "    plt.show()"
76
   ]
77
  },
78
  {
79
   "cell_type": "code",
80
   "execution_count": 3,
81
   "id": "3cc88fda",
82
   "metadata": {},
83
   "outputs": [],
84
   "source": [
85
    "def show_images(tdir):\n",
86
    "    classlist=os.listdir(tdir)\n",
87
    "    length=len(classlist)\n",
88
    "    columns=5\n",
89
    "    rows=int(np.ceil(length/columns))    \n",
90
    "    plt.figure(figsize=(20, rows * 4))\n",
91
    "    for i, klass in enumerate(classlist):    \n",
92
    "        classpath=os.path.join(tdir, klass)\n",
93
    "        imgpath=os.path.join(classpath, '1.jpg')\n",
94
    "        img=plt.imread(imgpath)\n",
95
    "        plt.subplot(rows, columns, i+1)\n",
96
    "        plt.axis('off')\n",
97
    "        plt.title(klass, color='blue', fontsize=12)\n",
98
    "        plt.imshow(img)"
99
   ]
100
  },
101
  {
102
   "cell_type": "code",
103
   "execution_count": 4,
104
   "id": "564a3b4b",
105
   "metadata": {},
106
   "outputs": [],
107
   "source": [
108
    "def print_in_color(txt_msg,fore_tupple,back_tupple,):\n",
109
    "    #prints the text_msg in the foreground color specified by fore_tupple with the background specified by back_tupple \n",
110
    "    #text_msg is the text, fore_tupple is foregroud color tupple (r,g,b), back_tupple is background tupple (r,g,b)\n",
111
    "    rf,gf,bf=fore_tupple\n",
112
    "    rb,gb,bb=back_tupple\n",
113
    "    msg='{0}' + txt_msg\n",
114
    "    mat='\\33[38;2;' + str(rf) +';' + str(gf) + ';' + str(bf) + ';48;2;' + str(rb) + ';' +str(gb) + ';' + str(bb) +'m' \n",
115
    "    print(msg .format(mat), flush=True)\n",
116
    "    print('\\33[0m', flush=True) # returns default print color to back to black\n",
117
    "    return"
118
   ]
119
  },
120
  {
121
   "cell_type": "code",
122
   "execution_count": 5,
123
   "id": "9bbdb8ab",
124
   "metadata": {},
125
   "outputs": [],
126
   "source": [
127
    "class LRA(keras.callbacks.Callback):\n",
128
    "    def __init__(self,model, base_model, patience,stop_patience, threshold, factor, dwell, batches, initial_epoch,epochs, ask_epoch):\n",
129
    "        super(LRA, self).__init__()\n",
130
    "        self.model=model\n",
131
    "        self.base_model=base_model\n",
132
    "        self.patience=patience # specifies how many epochs without improvement before learning rate is adjusted\n",
133
    "        self.stop_patience=stop_patience # specifies how many times to adjust lr without improvement to stop training\n",
134
    "        self.threshold=threshold # specifies training accuracy threshold when lr will be adjusted based on validation loss\n",
135
    "        self.factor=factor # factor by which to reduce the learning rate\n",
136
    "        self.dwell=dwell\n",
137
    "        self.batches=batches # number of training batch to runn per epoch\n",
138
    "        self.initial_epoch=initial_epoch\n",
139
    "        self.epochs=epochs\n",
140
    "        self.ask_epoch=ask_epoch\n",
141
    "        self.ask_epoch_initial=ask_epoch # save this value to restore if restarting training\n",
142
    "        # callback variables \n",
143
    "        self.count=0 # how many times lr has been reduced without improvement\n",
144
    "        self.stop_count=0        \n",
145
    "        self.best_epoch=1   # epoch with the lowest loss        \n",
146
    "        self.initial_lr=float(tf.keras.backend.get_value(model.optimizer.lr)) # get the initiallearning rate and save it         \n",
147
    "        self.highest_tracc=0.0 # set highest training accuracy to 0 initially\n",
148
    "        self.lowest_vloss=np.inf # set lowest validation loss to infinity initially\n",
149
    "        self.best_weights=self.model.get_weights() # set best weights to model's initial weights\n",
150
    "        self.initial_weights=self.model.get_weights()   # save initial weights if they have to get restored \n",
151
    "        \n",
152
    "    def on_train_begin(self, logs=None):        \n",
153
    "        if self.base_model != None:\n",
154
    "            status=base_model.trainable\n",
155
    "            if status:\n",
156
    "                msg=' initializing callback starting training with base_model trainable'\n",
157
    "            else:\n",
158
    "                msg='initializing callback starting training with base_model not trainable'\n",
159
    "        else:\n",
160
    "            msg='initialing callback and starting training'                        \n",
161
    "        print_in_color (msg, (244, 252, 3), (55,65,80)) \n",
162
    "        msg='{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:10s}{9:^8s}'.format('Epoch', 'Loss', 'Accuracy',\n",
163
    "                                                                                              'V_loss','V_acc', 'LR', 'Next LR', 'Monitor','% Improv', 'Duration')\n",
164
    "        print_in_color(msg, (244,252,3), (55,65,80)) \n",
165
    "        self.start_time= time.time()\n",
166
    "        \n",
167
    "    def on_train_end(self, logs=None):\n",
168
    "        stop_time=time.time()\n",
169
    "        tr_duration= stop_time- self.start_time            \n",
170
    "        hours = tr_duration // 3600\n",
171
    "        minutes = (tr_duration - (hours * 3600)) // 60\n",
172
    "        seconds = tr_duration - ((hours * 3600) + (minutes * 60))\n",
173
    "\n",
174
    "        self.model.set_weights(self.best_weights) # set the weights of the model to the best weights\n",
175
    "        msg=f'Training is completed - model is set with weights from epoch {self.best_epoch} '\n",
176
    "        print_in_color(msg, (0,255,0), (55,65,80))\n",
177
    "        msg = f'training elapsed time was {str(hours)} hours, {minutes:4.1f} minutes, {seconds:4.2f} seconds)'\n",
178
    "        print_in_color(msg, (0,255,0), (55,65,80))   \n",
179
    "        \n",
180
    "    def on_train_batch_end(self, batch, logs=None):\n",
181
    "        acc=logs.get('accuracy')* 100  # get training accuracy \n",
182
    "        loss=logs.get('loss')\n",
183
    "        msg='{0:20s}processing batch {1:4s} of {2:5s} accuracy= {3:8.3f}  loss: {4:8.5f}'.format(' ', str(batch), str(self.batches), acc, loss)\n",
184
    "        print(msg, '\\r', end='') # prints over on the same line to show running batch count        \n",
185
    "        \n",
186
    "    def on_epoch_begin(self,epoch, logs=None):\n",
187
    "        self.now= time.time()\n",
188
    "        \n",
189
    "    def on_epoch_end(self, epoch, logs=None):  # method runs on the end of each epoch\n",
190
    "        later=time.time()\n",
191
    "        duration=later-self.now \n",
192
    "        lr=float(tf.keras.backend.get_value(self.model.optimizer.lr)) # get the current learning rate\n",
193
    "        current_lr=lr\n",
194
    "        v_loss=logs.get('val_loss')  # get the validation loss for this epoch\n",
195
    "        acc=logs.get('accuracy')  # get training accuracy \n",
196
    "        v_acc=logs.get('val_accuracy')\n",
197
    "        loss=logs.get('loss')        \n",
198
    "        if acc < self.threshold: # if training accuracy is below threshold adjust lr based on training accuracy\n",
199
    "            monitor='accuracy'\n",
200
    "            if epoch ==0:\n",
201
    "                pimprov=0.0\n",
202
    "            else:\n",
203
    "                pimprov= (acc-self.highest_tracc )*100/self.highest_tracc\n",
204
    "            if acc>self.highest_tracc: # training accuracy improved in the epoch                \n",
205
    "                self.highest_tracc=acc # set new highest training accuracy\n",
206
    "                self.best_weights=self.model.get_weights() # traing accuracy improved so save the weights\n",
207
    "                self.count=0 # set count to 0 since training accuracy improved\n",
208
    "                self.stop_count=0 # set stop counter to 0\n",
209
    "                if v_loss<self.lowest_vloss:\n",
210
    "                    self.lowest_vloss=v_loss\n",
211
    "                color= (0,255,0)\n",
212
    "                self.best_epoch=epoch + 1  # set the value of best epoch for this epoch              \n",
213
    "            else: \n",
214
    "                # training accuracy did not improve check if this has happened for patience number of epochs\n",
215
    "                # if so adjust learning rate\n",
216
    "                if self.count>=self.patience -1: # lr should be adjusted\n",
217
    "                    color=(245, 170, 66)\n",
218
    "                    lr= lr* self.factor # adjust the learning by factor\n",
219
    "                    tf.keras.backend.set_value(self.model.optimizer.lr, lr) # set the learning rate in the optimizer\n",
220
    "                    self.count=0 # reset the count to 0\n",
221
    "                    self.stop_count=self.stop_count + 1 # count the number of consecutive lr adjustments\n",
222
    "                    self.count=0 # reset counter\n",
223
    "                    if self.dwell:\n",
224
    "                        self.model.set_weights(self.best_weights) # return to better point in N space                        \n",
225
    "                    else:\n",
226
    "                        if v_loss<self.lowest_vloss:\n",
227
    "                            self.lowest_vloss=v_loss                                    \n",
228
    "                else:\n",
229
    "                    self.count=self.count +1 # increment patience counter                    \n",
230
    "        else: # training accuracy is above threshold so adjust learning rate based on validation loss\n",
231
    "            monitor='val_loss'\n",
232
    "            if epoch ==0:\n",
233
    "                pimprov=0.0\n",
234
    "            else:\n",
235
    "                pimprov= (self.lowest_vloss- v_loss )*100/self.lowest_vloss\n",
236
    "            if v_loss< self.lowest_vloss: # check if the validation loss improved \n",
237
    "                self.lowest_vloss=v_loss # replace lowest validation loss with new validation loss                \n",
238
    "                self.best_weights=self.model.get_weights() # validation loss improved so save the weights\n",
239
    "                self.count=0 # reset count since validation loss improved  \n",
240
    "                self.stop_count=0  \n",
241
    "                color=(0,255,0)                \n",
242
    "                self.best_epoch=epoch + 1 # set the value of the best epoch to this epoch\n",
243
    "            else: # validation loss did not improve\n",
244
    "                if self.count>=self.patience-1: # need to adjust lr\n",
245
    "                    color=(245, 170, 66)\n",
246
    "                    lr=lr * self.factor # adjust the learning rate                    \n",
247
    "                    self.stop_count=self.stop_count + 1 # increment stop counter because lr was adjusted \n",
248
    "                    self.count=0 # reset counter\n",
249
    "                    tf.keras.backend.set_value(self.model.optimizer.lr, lr) # set the learning rate in the optimizer\n",
250
    "                    if self.dwell:\n",
251
    "                        self.model.set_weights(self.best_weights) # return to better point in N space\n",
252
    "                else: \n",
253
    "                    self.count =self.count +1 # increment the patience counter                    \n",
254
    "                if acc>self.highest_tracc:\n",
255
    "                    self.highest_tracc= acc\n",
256
    "        msg=f'{str(epoch+1):^3s}/{str(self.epochs):4s} {loss:^9.3f}{acc*100:^9.3f}{v_loss:^9.5f}{v_acc*100:^9.3f}{current_lr:^9.5f}{lr:^9.5f}{monitor:^11s}{pimprov:^10.2f}{duration:^8.2f}'\n",
257
    "        print_in_color (msg,color, (55,65,80))\n",
258
    "        if self.stop_count> self.stop_patience - 1: # check if learning rate has been adjusted stop_count times with no improvement\n",
259
    "            msg=f' training has been halted at epoch {epoch + 1} after {self.stop_patience} adjustments of learning rate with no improvement'\n",
260
    "            print_in_color(msg, (0,255,255), (55,65,80))\n",
261
    "            self.model.stop_training = True # stop training\n",
262
    "        else: \n",
263
    "            if self.ask_epoch !=None:\n",
264
    "                if epoch + 1 >= self.ask_epoch:\n",
265
    "                    if base_model.trainable:\n",
266
    "                        msg='enter H to halt training or an integer for number of epochs to run then ask again'\n",
267
    "                    else:\n",
268
    "                        msg='enter H to halt training ,F to fine tune model, or an integer for number of epochs to run then ask again'\n",
269
    "                    print_in_color(msg, (0,255,255), (55,65,80))\n",
270
    "                    ans=input('')\n",
271
    "                    if ans=='H' or ans=='h':\n",
272
    "                        msg=f'training has been halted at epoch {epoch + 1} due to user input'\n",
273
    "                        print_in_color(msg, (0,255,255), (55,65,80))\n",
274
    "                        self.model.stop_training = True # stop training\n",
275
    "                    elif ans == 'F' or ans=='f':\n",
276
    "                        if base_model.trainable:\n",
277
    "                            msg='base_model is already set as trainable'\n",
278
    "                        else:\n",
279
    "                            msg='setting base_model as trainable for fine tuning of model'\n",
280
    "                            self.base_model.trainable=True\n",
281
    "                        print_in_color(msg, (0, 255,255), (55,65,80))\n",
282
    "                        msg='{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:^8s}'.format('Epoch', 'Loss', 'Accuracy',\n",
283
    "                                                                                              'V_loss','V_acc', 'LR', 'Next LR', 'Monitor','% Improv', 'Duration')\n",
284
    "                        print_in_color(msg, (244,252,3), (55,65,80))                         \n",
285
    "                        self.count=0\n",
286
    "                        self.stop_count=0                        \n",
287
    "                        self.ask_epoch = epoch + 1 + self.ask_epoch_initial \n",
288
    "                        \n",
289
    "                    else:\n",
290
    "                        ans=int(ans)\n",
291
    "                        self.ask_epoch +=ans\n",
292
    "                        msg=f' training will continue until epoch ' + str(self.ask_epoch)                         \n",
293
    "                        print_in_color(msg, (0, 255,255), (55,65,80))\n",
294
    "                        msg='{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:10s}{9:^8s}'.format('Epoch', 'Loss', 'Accuracy',\n",
295
    "                                                                                              'V_loss','V_acc', 'LR', 'Next LR', 'Monitor','% Improv', 'Duration')\n",
296
    "                        print_in_color(msg, (244,252,3), (55,65,80))"
297
   ]
298
  },
299
  {
300
   "cell_type": "code",
301
   "execution_count": 6,
302
   "id": "e155beef",
303
   "metadata": {},
304
   "outputs": [],
305
   "source": [
306
    "def tr_plot(tr_data, start_epoch):\n",
307
    "    #Plot the training and validation data\n",
308
    "    tacc=tr_data.history['accuracy']\n",
309
    "    tloss=tr_data.history['loss']\n",
310
    "    vacc=tr_data.history['val_accuracy']\n",
311
    "    vloss=tr_data.history['val_loss']\n",
312
    "    Epoch_count=len(tacc)+ start_epoch\n",
313
    "    Epochs=[]\n",
314
    "    for i in range (start_epoch ,Epoch_count):\n",
315
    "        Epochs.append(i+1)   \n",
316
    "    index_loss=np.argmin(vloss)#  this is the epoch with the lowest validation loss\n",
317
    "    val_lowest=vloss[index_loss]\n",
318
    "    index_acc=np.argmax(vacc)\n",
319
    "    acc_highest=vacc[index_acc]\n",
320
    "    plt.style.use('fivethirtyeight')\n",
321
    "    sc_label='best epoch= '+ str(index_loss+1 +start_epoch)\n",
322
    "    vc_label='best epoch= '+ str(index_acc + 1+ start_epoch)\n",
323
    "    fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(20,8))\n",
324
    "    axes[0].plot(Epochs,tloss, 'r', label='Training loss')\n",
325
    "    axes[0].plot(Epochs,vloss,'g',label='Validation loss' )\n",
326
    "    axes[0].scatter(index_loss+1 +start_epoch,val_lowest, s=150, c= 'blue', label=sc_label)\n",
327
    "    axes[0].set_title('Training and Validation Loss')\n",
328
    "    axes[0].set_xlabel('Epochs')\n",
329
    "    axes[0].set_ylabel('Loss')\n",
330
    "    axes[0].legend()\n",
331
    "    axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')\n",
332
    "    axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')\n",
333
    "    axes[1].scatter(index_acc+1 +start_epoch,acc_highest, s=150, c= 'blue', label=vc_label)\n",
334
    "    axes[1].set_title('Training and Validation Accuracy')\n",
335
    "    axes[1].set_xlabel('Epochs')\n",
336
    "    axes[1].set_ylabel('Accuracy')\n",
337
    "    axes[1].legend()\n",
338
    "    plt.tight_layout\n",
339
    "    #plt.style.use('fivethirtyeight')\n",
340
    "    plt.show()"
341
   ]
342
  },
343
  {
344
   "cell_type": "code",
345
   "execution_count": 7,
346
   "id": "35f70802",
347
   "metadata": {},
348
   "outputs": [],
349
   "source": [
350
    "def print_info( test_gen, preds, print_code, save_dir, subject ):\n",
351
    "    class_dict=test_gen.class_indices\n",
352
    "    labels= test_gen.labels\n",
353
    "    file_names= test_gen.filenames \n",
354
    "    error_list=[]\n",
355
    "    true_class=[]\n",
356
    "    pred_class=[]\n",
357
    "    prob_list=[]\n",
358
    "    new_dict={}\n",
359
    "    error_indices=[]\n",
360
    "    y_pred=[]\n",
361
    "    for key,value in class_dict.items():\n",
362
    "        new_dict[value]=key             # dictionary {integer of class number: string of class name}\n",
363
    "    # store new_dict as a text fine in the save_dir\n",
364
    "    classes=list(new_dict.values())     # list of string of class names     \n",
365
    "    errors=0      \n",
366
    "    for i, p in enumerate(preds):\n",
367
    "        pred_index=np.argmax(p)         \n",
368
    "        true_index=labels[i]  # labels are integer values\n",
369
    "        if pred_index != true_index: # a misclassification has occurred\n",
370
    "            error_list.append(file_names[i])\n",
371
    "            true_class.append(new_dict[true_index])\n",
372
    "            pred_class.append(new_dict[pred_index])\n",
373
    "            prob_list.append(p[pred_index])\n",
374
    "            error_indices.append(true_index)            \n",
375
    "            errors=errors + 1\n",
376
    "        y_pred.append(pred_index) \n",
377
    "    tests=len(preds)\n",
378
    "    acc= (1-errors/tests) *100\n",
379
    "    msg= f'There were {errors} errors in {tests} test cases Model accuracy= {acc: 6.2f} %'\n",
380
    "    print_in_color(msg,(0,255,255),(55,65,80))\n",
381
    "    if print_code !=0:\n",
382
    "        if errors>0:\n",
383
    "            if print_code>errors:\n",
384
    "                r=errors\n",
385
    "            else:\n",
386
    "                r=print_code           \n",
387
    "            msg='{0:^28s}{1:^28s}{2:^28s}{3:^16s}'.format('Filename', 'Predicted Class' , 'True Class', 'Probability')\n",
388
    "            print_in_color(msg, (0,255,0),(55,65,80))\n",
389
    "            for i in range(r):                \n",
390
    "                split1=os.path.split(error_list[i])                \n",
391
    "                split2=os.path.split(split1[0])                \n",
392
    "                fname=split2[1] + '/' + split1[1]\n",
393
    "                msg='{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(fname, pred_class[i],true_class[i], ' ', prob_list[i])\n",
394
    "                print_in_color(msg, (255,255,255), (55,65,60))\n",
395
    "                #print(error_list[i]  , pred_class[i], true_class[i], prob_list[i])               \n",
396
    "        else:\n",
397
    "            msg='With accuracy of 100 % there are no errors to print'\n",
398
    "            print_in_color(msg, (0,255,0),(55,65,80))\n",
399
    "    if errors>0:\n",
400
    "        plot_bar=[]\n",
401
    "        plot_class=[]\n",
402
    "        for  key, value in new_dict.items():        \n",
403
    "            count=error_indices.count(key) \n",
404
    "            if count!=0:\n",
405
    "                plot_bar.append(count) # list containg how many times a class c had an error\n",
406
    "                plot_class.append(value)   # stores the class \n",
407
    "        fig=plt.figure()\n",
408
    "        fig.set_figheight(len(plot_class)/3)\n",
409
    "        fig.set_figwidth(10)\n",
410
    "        plt.style.use('fivethirtyeight')\n",
411
    "        for i in range(0, len(plot_class)):\n",
412
    "            c=plot_class[i]\n",
413
    "            x=plot_bar[i]\n",
414
    "            plt.barh(c, x, )\n",
415
    "            plt.title( ' Errors by Class on Test Set')\n",
416
    "    y_true= np.array(labels)        \n",
417
    "    y_pred=np.array(y_pred)\n",
418
    "    if len(classes)<= 30:\n",
419
    "        # create a confusion matrix \n",
420
    "        cm = confusion_matrix(y_true, y_pred )        \n",
421
    "        length=len(classes)\n",
422
    "        if length<8:\n",
423
    "            fig_width=8\n",
424
    "            fig_height=8\n",
425
    "        else:\n",
426
    "            fig_width= int(length * .5)\n",
427
    "            fig_height= int(length * .5)\n",
428
    "        plt.figure(figsize=(fig_width, fig_height))\n",
429
    "        sns.heatmap(cm, annot=True, vmin=0, fmt='g', cmap='Blues', cbar=False)       \n",
430
    "        plt.xticks(np.arange(length)+.5, classes, rotation= 90)\n",
431
    "        plt.yticks(np.arange(length)+.5, classes, rotation=0)\n",
432
    "        plt.xlabel(\"Predicted\")\n",
433
    "        plt.ylabel(\"Actual\")\n",
434
    "        plt.title(\"Confusion Matrix\")\n",
435
    "        plt.show()\n",
436
    "    clr = classification_report(y_true, y_pred, target_names=classes, digits= 4)\n",
437
    "    print(\"Classification Report:\\n----------------------\\n\", clr)\n",
438
    "    return acc/100"
439
   ]
440
  },
441
  {
442
   "cell_type": "code",
443
   "execution_count": 8,
444
   "id": "e7d27934",
445
   "metadata": {},
446
   "outputs": [],
447
   "source": [
448
    "def saver(save_path, model, model_name, subject, accuracy,img_size, scalar, generator):    \n",
449
    "    # first save the model\n",
450
    "    save_id=str (model_name +  '-' + subject +'-'+ str(acc)[:str(acc).rfind('.')+3] + '.h5')\n",
451
    "    model_save_loc=os.path.join(save_path, save_id)\n",
452
    "    model.save(model_save_loc)\n",
453
    "    print_in_color ('model was saved as ' + model_save_loc, (0,255,0),(55,65,80)) \n",
454
    "    # now create the class_df and convert to csv file    \n",
455
    "    class_dict=generator.class_indices \n",
456
    "    height=[]\n",
457
    "    width=[]\n",
458
    "    scale=[]\n",
459
    "    for i in range(len(class_dict)):\n",
460
    "        height.append(img_size[0])\n",
461
    "        width.append(img_size[1])\n",
462
    "        scale.append(scalar)\n",
463
    "    Index_series=pd.Series(list(class_dict.values()), name='class_index')\n",
464
    "    Class_series=pd.Series(list(class_dict.keys()), name='class') \n",
465
    "    Height_series=pd.Series(height, name='height')\n",
466
    "    Width_series=pd.Series(width, name='width')\n",
467
    "    Scale_series=pd.Series(scale, name='scale by')\n",
468
    "    class_df=pd.concat([Index_series, Class_series, Height_series, Width_series, Scale_series], axis=1)    \n",
469
    "    csv_name='class_dict.csv'\n",
470
    "    csv_save_loc=os.path.join(save_path, csv_name)\n",
471
    "    class_df.to_csv(csv_save_loc, index=False) \n",
472
    "    print_in_color ('class csv file was saved as ' + csv_save_loc, (0,255,0),(55,65,80)) \n",
473
    "    return model_save_loc, csv_save_loc\n"
474
   ]
475
  },
476
  {
477
   "cell_type": "code",
478
   "execution_count": 9,
479
   "id": "6268a332",
480
   "metadata": {},
481
   "outputs": [],
482
   "source": [
483
    "def predictor(sdir, csv_path,  model_path, averaged=True, verbose=True):    \n",
484
    "    # read in the csv file\n",
485
    "    class_df=pd.read_csv(csv_path)    \n",
486
    "    class_count=len(class_df['class'].unique())\n",
487
    "    img_height=int(class_df['height'].iloc[0])\n",
488
    "    img_width =int(class_df['width'].iloc[0])\n",
489
    "    img_size=(img_width, img_height)    \n",
490
    "    scale=class_df['scale by'].iloc[0]    \n",
491
    "    # determine value to scale image pixels by\n",
492
    "    try: \n",
493
    "        s=int(scale)\n",
494
    "        s2=1\n",
495
    "        s1=0\n",
496
    "    except:\n",
497
    "        split=scale.split('-')\n",
498
    "        s1=float(split[1])\n",
499
    "        s2=float(split[0].split('*')[1])\n",
500
    "    path_list=[]\n",
501
    "    paths=os.listdir(sdir)    \n",
502
    "    for f in paths:\n",
503
    "        path_list.append(os.path.join(sdir,f))\n",
504
    "    if verbose:\n",
505
    "        print (' Model is being loaded- this will take about 10 seconds')\n",
506
    "    model=load_model(model_path)\n",
507
    "    image_count=len(path_list) \n",
508
    "    image_list=[]\n",
509
    "    file_list=[]\n",
510
    "    good_image_count=0\n",
511
    "    for i in range (image_count):        \n",
512
    "        try:\n",
513
    "            img=cv2.imread(path_list[i])\n",
514
    "            img=cv2.resize(img, img_size)\n",
515
    "            img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)            \n",
516
    "            good_image_count +=1\n",
517
    "            img=img*s2 - s1             \n",
518
    "            image_list.append(img)\n",
519
    "            file_name=os.path.split(path_list[i])[1]\n",
520
    "            file_list.append(file_name)\n",
521
    "        except:\n",
522
    "            if verbose:\n",
523
    "                print ( path_list[i], ' is an invalid image file')\n",
524
    "    if good_image_count==1: # if only a single image need to expand dimensions\n",
525
    "        averaged=True\n",
526
    "    image_array=np.array(image_list)    \n",
527
    "    # make predictions on images, sum the probabilities of each class then find class index with\n",
528
    "    # highest probability\n",
529
    "    preds=model.predict(image_array)    \n",
530
    "    if averaged:\n",
531
    "        psum=[]\n",
532
    "        for i in range (class_count): # create all 0 values list\n",
533
    "            psum.append(0)    \n",
534
    "        for p in preds: # iterate over all predictions\n",
535
    "            for i in range (class_count):\n",
536
    "                psum[i]=psum[i] + p[i]  # sum the probabilities   \n",
537
    "        index=np.argmax(psum) # find the class index with the highest probability sum        \n",
538
    "        klass=class_df['class'].iloc[index] # get the class name that corresponds to the index\n",
539
    "        prob=psum[index]/good_image_count * 100  # get the probability average         \n",
540
    "        # to show the correct image run predict again and select first image that has same index\n",
541
    "        for img in image_array:  #iterate through the images    \n",
542
    "            test_img=np.expand_dims(img, axis=0) # since it is a single image expand dimensions \n",
543
    "            test_index=np.argmax(model.predict(test_img)) # for this image find the class index with highest probability\n",
544
    "            if test_index== index: # see if this image has the same index as was selected previously\n",
545
    "                if verbose: # show image and print result if verbose=1\n",
546
    "                    plt.axis('off')\n",
547
    "                    plt.imshow(img) # show the image\n",
548
    "                    print (f'predicted species is {klass} with a probability of {prob:6.4f} % ')\n",
549
    "                break # found an image that represents the predicted class      \n",
550
    "        return klass, prob, img, None\n",
551
    "    else: # create individual predictions for each image\n",
552
    "        pred_class=[]\n",
553
    "        prob_list=[]\n",
554
    "        for i, p in enumerate(preds):\n",
555
    "            index=np.argmax(p) # find the class index with the highest probability sum\n",
556
    "            klass=class_df['class'].iloc[index] # get the class name that corresponds to the index\n",
557
    "            image_file= file_list[i]\n",
558
    "            pred_class.append(klass)\n",
559
    "            prob_list.append(p[index])            \n",
560
    "        Fseries=pd.Series(file_list, name='image file')\n",
561
    "        Lseries=pd.Series(pred_class, name= 'species')\n",
562
    "        Pseries=pd.Series(prob_list, name='probability')\n",
563
    "        df=pd.concat([Fseries, Lseries, Pseries], axis=1)\n",
564
    "        if verbose:\n",
565
    "            length= len(df)\n",
566
    "            print (df.head(length))\n",
567
    "        return None, None, None, df"
568
   ]
569
  },
570
  {
571
   "cell_type": "code",
572
   "execution_count": 10,
573
   "id": "516211e9",
574
   "metadata": {},
575
   "outputs": [],
576
   "source": [
577
    "def trim (df, max_size, min_size, column):\n",
578
    "    df=df.copy()\n",
579
    "    original_class_count= len(list(df[column].unique()))\n",
580
    "    print ('Original Number of classes in dataframe: ', original_class_count)\n",
581
    "    sample_list=[] \n",
582
    "    groups=df.groupby(column)\n",
583
    "    for label in df[column].unique():        \n",
584
    "        group=groups.get_group(label)\n",
585
    "        sample_count=len(group)         \n",
586
    "        if sample_count> max_size :\n",
587
    "            strat=group[column]\n",
588
    "            samples,_=train_test_split(group, train_size=max_size, shuffle=True, random_state=123, stratify=strat)            \n",
589
    "            sample_list.append(samples)\n",
590
    "        elif sample_count>= min_size:\n",
591
    "            sample_list.append(group)\n",
592
    "    df=pd.concat(sample_list, axis=0).reset_index(drop=True)\n",
593
    "    final_class_count= len(list(df[column].unique())) \n",
594
    "    if final_class_count != original_class_count:\n",
595
    "        print ('*** WARNING***  dataframe has a reduced number of classes' )\n",
596
    "    balance=list(df[column].value_counts())\n",
597
    "    print (balance)\n",
598
    "    return df"
599
   ]
600
  },
601
  {
602
   "cell_type": "code",
603
   "execution_count": 11,
604
   "id": "0b66bd08",
605
   "metadata": {},
606
   "outputs": [],
607
   "source": [
608
    "def balance(train_df,max_samples, min_samples, column, working_dir, image_size):\n",
609
    "    train_df=train_df.copy()\n",
610
    "    train_df=trim (train_df, max_samples, min_samples, column)    \n",
611
    "    # make directories to store augmented images\n",
612
    "    aug_dir=os.path.join(working_dir, 'aug')\n",
613
    "    if os.path.isdir(aug_dir):\n",
614
    "        shutil.rmtree(aug_dir)\n",
615
    "    os.mkdir(aug_dir)\n",
616
    "    for label in train_df['labels'].unique():    \n",
617
    "        dir_path=os.path.join(aug_dir,label)    \n",
618
    "        os.mkdir(dir_path)\n",
619
    "    # create and store the augmented images  \n",
620
    "    total=0\n",
621
    "    gen=ImageDataGenerator(horizontal_flip=True,  rotation_range=20, width_shift_range=.2,\n",
622
    "                                  height_shift_range=.2, zoom_range=.2)\n",
623
    "    groups=train_df.groupby('labels') # group by class\n",
624
    "    for label in train_df['labels'].unique():  # for every class               \n",
625
    "        group=groups.get_group(label)  # a dataframe holding only rows with the specified label \n",
626
    "        sample_count=len(group)   # determine how many samples there are in this class  \n",
627
    "        if sample_count< max_samples: # if the class has less than target number of images\n",
628
    "            aug_img_count=0\n",
629
    "            delta=max_samples-sample_count  # number of augmented images to create\n",
630
    "            target_dir=os.path.join(aug_dir, label)  # define where to write the images    \n",
631
    "            aug_gen=gen.flow_from_dataframe( group,  x_col='filepaths', y_col=None, target_size=image_size,\n",
632
    "                                            class_mode=None, batch_size=1, shuffle=False, \n",
633
    "                                            save_to_dir=target_dir, save_prefix='aug-', color_mode='rgb',\n",
634
    "                                            save_format='jpg')\n",
635
    "            while aug_img_count<delta:\n",
636
    "                images=next(aug_gen)            \n",
637
    "                aug_img_count += len(images)\n",
638
    "            total +=aug_img_count\n",
639
    "    print('Total Augmented images created= ', total)\n",
640
    "    # create aug_df and merge with train_df to create composite training set ndf\n",
641
    "    if total>0:\n",
642
    "        aug_fpaths=[]\n",
643
    "        aug_labels=[]\n",
644
    "        classlist=os.listdir(aug_dir)\n",
645
    "        for klass in classlist:\n",
646
    "            classpath=os.path.join(aug_dir, klass)     \n",
647
    "            flist=os.listdir(classpath)    \n",
648
    "            for f in flist:        \n",
649
    "                fpath=os.path.join(classpath,f)         \n",
650
    "                aug_fpaths.append(fpath)\n",
651
    "                aug_labels.append(klass)\n",
652
    "        Fseries=pd.Series(aug_fpaths, name='filepaths')\n",
653
    "        Lseries=pd.Series(aug_labels, name='labels')\n",
654
    "        aug_df=pd.concat([Fseries, Lseries], axis=1)\n",
655
    "        train_df=pd.concat([train_df,aug_df], axis=0).reset_index(drop=True)\n",
656
    "   \n",
657
    "    print (list(train_df['labels'].value_counts()) )\n",
658
    "    return train_df "
659
   ]
660
  },
661
  {
662
   "cell_type": "code",
663
   "execution_count": 12,
664
   "id": "8fc60cf6",
665
   "metadata": {},
666
   "outputs": [
667
    {
668
     "name": "stdout",
669
     "output_type": "stream",
670
     "text": [
671
      "Input image shape is  (450, 450, 3)\n"
672
     ]
673
    },
674
    {
675
     "data": {
676
      "text/plain": [
677
       "<matplotlib.image.AxesImage at 0x154c002b0a0>"
678
      ]
679
     },
680
     "execution_count": 12,
681
     "metadata": {},
682
     "output_type": "execute_result"
683
    },
684
    {
685
     "data": {
686
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOcAAADnCAYAAADl9EEgAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAABATUlEQVR4nO29abNc15EdujL3qXtBzBMHkRSpgbIkWt3utlqvW2q/50+O8O998SIcfoMd7lZLem6pqaFFiSM4gCTmgcCdqs7O9IdcuXeBIiiKIoAi714RIMA7VJ2qOrlzWrlS3N0xMDCwcdBHfQEDAwMfj2GcAwMbimGcAwMbimGcAwMbimGcAwMbiumTvikiD+s6BgYOLe7XMBmec2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzE96gsYeLAQCADg1GMncXL7eHzNgedPfxWnHzuNCoGLYmd5B2/feAt3lzu4sXsT5vYoL3sAgLi73/ebIg/zWgb+BAgERxbbeOL44xAAV3euY2+1j2NbR3Hu6FkAjieOncezJ5+CmeH44gSObR2HwGAmECggguoAoBABBIYDO8DNg1t45dpreOvm21jOSzjue4sMfA64nwkO43zEKFr+8IsOVK8f//NScO7YGXznK9/BE0efwJntUxB33Dm4heW8wva0jeOLE3BzKMJLmjkMCoMCHqbm7nB3KARQhbsAcExaoAuF6YzbB3fw/p338PN3f4GD+eBBvg2HGsM4NwBFCwSCs8dP48SREziyOIqvnXsOkyjU4/2u5qhueOv6OzhY7a/5LMd22cY3z30Np46cxpHpCMQEYoAAEDgAgzvgRoM0w2zO3xZUUZh5/LyE4YYHBQyAaIGKQBQok6IUgYrg6u4VvHzlZVy48dYw0geAYZyPACqKrWmBp089iccWR/H8ua9CZQtHpm0syjasChSK4o4iYVQOhYtDRFCEJucOdYeKQF0hEJgDjvCOXh0Ch4iDv4BqQDXHLAD4Edf2e1EJVBEInwMOSClQVagooIKpxDUUVbhWXN+/jp+89TNcvnNp5KSfI4ZxPmScO3Ya33nyBTx54gkcKcfgUMzVUWeHV4FD4BYGLAAUDqeXC6NBMxw3oAAoAohqmKTQIFXi9zy8pzgAB6w6ZtAjwgF3VHMYBFDpxsmSkYjANYxVGWqXEgeBimAqQFkU7PkBfn/1FfzqvZewv9p/FG/tlw7DOB8iHj9+Fj/6xg9wauskVkvHwcqxMmBVBWaAuESuB6DXUwF3g0JQ4HBEvCoigCtU4uuqCheBikI1DS+8IYyWCcFcI8Q1tzBaCKo7qgNFNTw1Q2kFwshFACnta6UIIHF9RQVlKijTBFPHjYNr+OmFn+LKnSuw++THA58OwzgfEh4/fg4/+vr3cXQ6geW+YbV0LKtj5YLqUXgpEiY5SXgmiMM8wtcJYXDp8dpH4MCkYaSgcU6qUBgi14yQFeBjWfzbzQCPx5kdMAhUIk9N4ywSxgdVOAQQhQJYlPCrhvi+qkJLgU4FZaFY+hIXbryF3176NW7cvfYI3u0vB4ZxPgQ8fvwc/v7r38fx6Th29yoODhzLFbByoKYhIkLZAsdC0tjCUKKCCoDNCxFpXg+gV9XSWhuTKFRYfYXA3VsumsapDogbKkAzFv50GCPcsHBAaXyQ6HvG4SFQjWsWQXy/FIgoFpOiLCZgUXDxzjv477/7r1iOYtFnwjDOB4zHj5/Fj77+NzhRTmJvb8b+0rCagVUNo3BJT+lQLVA4CgsyYYIO8cg7PSLTMFqgfx+C0v6tKAoYunHCHWYClwiRQU8Md1REOyUD3/xTAEzCApFqXI9GcA1knpvXI1Ap4bUnQZkUsljAiuJfL/0Sv3rnnzHb6mG/9V943M8EB0Poc8DjJ87ih1//Po6WE9jZqzhYGubqPYyEoECxENAIDaqKotLaIGb0TgUwj5ZKfmTpPaMQa1HwEYfXyE1NaMXItJOHqgOV/0zDNGRrJf7jIjRcgVSHKDDBUaTCIeFpDTABVDw8KwzmQDGFVmDa2sJfPPVXADAM9HPEMM4/E+ePn8UPv/YDPKYnsLNbsVoaahVYDd90T0UUGYoGpVncEVlekAEEUZQpbKXMHq0SZMgLkNETFhchbDxy9CyZqba6EBlAzGchjqJJREhfLDD3dkjA4gAoypBZ+PjpxqMszNcnMK8QnTHphL94+q8AGH71zs8x2/ywPoIvLUZY+2fg8eNn8bfP/w2O6CkcHBiWy4o6R//P2dYQ0Wj0I4oqC+mkAY0GSgSmLpiY3zntwD0KQzCHuWN2QY3OJiRzSwkSwQRvOSdotOFAWY0FYF6j6ivOSi7DXO+VYfcMmenZJSq5KhoRQFZ34fH4GhXcxVaBLiagCN699RZ+d+k3uHz70n2ZTgMdI+f8nPH48TP4++f/Btt6Crv7jtWqwqrBrDktsn60NftFgCJRYRX3FlayvckqLhg6Ar3yGh5xhmAOa6KJAi4a1Vd61nyseIyozBbE7xh7LuE8He48HOhqwzEae5v07irQUsJY3Wmk4dnToEULtGhrtSy2Jsy+xMXb7+LlS7/F5Q+HkX4ShnF+jji+/Rj+07f+HifKadzdd+wtDVYrKXPsHRYBPG7yIgxbW4gYeadZcHVM0AxJLMJPpxdjFAkwL6wAebBozB9VYagqsCjGIn+raBIJDP2TJs2PfdAMceMh6X2hJCoAmkwhUagUAMFGSoKCsL1TSkEpBVOZUCZAJ8XKV7i2ew278x5ev/oaru9cw+5y58F/SF8gjILQ54RJC7731LdwajqJ5Z7BVoZgsilzPpZaPXuJ8cYHtS68jVmvsAoQfUg4KvM43uvgt/i1+FltjZT4HRNpFVuRpDSE8UO1PXfrbSICXYdhXqPgGSrcImA1Gm3hc4pFxbYCmGGYEFFBlKOCpSRrua6ZQysgK0XRgq889gy8CL5+7gVcvXsJv37vl7h29yp2hpF+Iobn/BMwacH3n30R3z33AuqB4mC/Ym82LD08GtgGEQCTSnv/CgtDzjGtrLq6A8XD4JLzaqDnZW6XtZ0o9zDXdAt2kEjwZRGHgHj/vNy95YpFaeyOHk4zzK0e5Hjjc7u1JwyvyB5okBSCPbRQjZaOG8NwcoIh7XW7O0qZMBXFVBSiAhTFYipQcXy4fxNXdq/id1d/j0t3Lt3XexwGjLD2z8SkE77/7Hfx4vkXUJfAwYHhYBbMlcwb8/RfAJjnCce20qsgiebxc/nTSs9pLOII+bbCHxBVuFmr2LZflKi0mme1t9kVLPuV9JoqJZkOKAypwziDweR01x6x7ho5IogPqgxjVbBQ6WUmN7gbDx72cUV5HYpJg9kkKpCi2JomLErMj6IoDnwfP33nJ3jt6mstJjhsGGHtn4GFTvjrZ1/Et8+9gLoULJcVq5ljWZEitnCz9mCVManCPLwbkvHja0YEABIeVOCRT2YIKlEwEvol/ihMOV1CL4gMMT1/HohiUhwOMY1ibN2APVRjHxMkHbDd4mjFHiC8poiH4UkYu5MI4XkgefRPFWGoIvG6g/qgfarG2KrhKFqZJjy2dQzfe+rf4b3b72NnefeBf5ZfJAzj/BT41hPP48XzL2A+ECwPKlbVsKoO+kdYa+1LK5JotPWbFSYRL+k/WWxh8ZX9RLSpksw7KyK7NBVITpZ4hqlhgO6kKSQNMH1zZWirDngQB8jqA2i26dHzQoLrK5keZxf1nvfDvZMiqnMqBgaXPKKMB0b8qeJQFsqqO9wLJuVkTTWcOXIeZ4+eHcb5EQyBrz+Cp06cx/ee+Ba0Knw21GqYHVhBsfTwSr6W60XhJZg2QLY7+C+v7YZ1hpjiRv/VIaysiCirpBFGuijDz1bCBZAUPkPNPBLgz9IbmsGrh6evMYTtnhXaCE2DO1v6qZCHimShKZ5JPLhGwutPPrBBUK0Pe0cODJhFi2k2x2xhzLPH34YwVtUJ3zz/rda+GQgMz3kfFFE8cfwcfvjcv8dROR455spQLW7E4LAy82LhRNIIRXqrgo/n9Gri0SN0VlkL0LxQtFbC+MwBtcr5zdaFhIqGMdKAFfErNY0d0kbJciytZTRrNh3hNXm6ou2wiBnOyHshEdKK89Bp1WChcTN3Zv6b11iCuQAgKtmGbCV1okTltRcEuWF7OgJ8xEMfdgzj/BgsyoR//8x38c0zz6HUbezvVcyzYTVXGL2kgt6BkxvqWeLhLeZoninrN57D02BIK4pqUfE0hpMF7FtmgUbW8lXxaGtYcmzppjJkztAWfdRMkvkDEhVIaIDza9l7lTDqcKSGSUorRiVDqQ+GC1x7FVk9K9ACrxZ9WrfmVbO4tG57LhGqG9aIEwP3YBjnRxCG+SK+c/6bqAfA/oHFZEnlzeeZ80m3QoSjUN5kngweoRHROzhDzviZvFe7sUOw5o26qYcxRx7rYPUV9OBa7q32kRSQhtQYQM2YI/QNQr6yxUMDYlSpGpMnSo6vtWiTVVwmlJMmgYLaRBbFqvT0afyq+dqDaMHp7jjoOOS9nhoMBIZxfgTPn3ka3338Baz2wzAPlh5FD0fjqKZUSNy8CC/kGdcK0EK+nr5l4aVIhq9RTsp2lWc4DKFbDaMGgKJAZLfd8wof2EkLFOaNwjA1Bb8EEv1XcJ4zQ1igcXjDQ0pyFiCuVEOgPybHFp5/xwEBzqUCjpotILVOSTQnbZHeWUObyLKPqwIUQVXHpbuXguM70DCMcw1PnjiPv3zqRcwHwP5+xf7KsLLQGiitmRE3UPBkpU1IZtSWIZqBMiI54kWPKPy+th4yCQrMU7ON0sJhei9AequEIbMyh8uKLmkO8ZzoLRWPv9Yqud50isDnEkmqX/xmagw5e7AlLgauCvHMH8MgCwAxD6/pwqJU/r/DYPTGYZCiglIUi60CnwwvX/oN3rj2SjvQBgLDOInTj53Ej772AxzBCezsrmIm07pRgZ4hb+A0hvB+6De/5WhXFEOKhBfqzZYs4zgfU5iTeWMHCVsZYSAMLWmcTB7v8c6cBlszNmV7JtsZOR4WhAf+Fq8hCAIKQfGufBB/pSeTdg3Rh5VWNS58TUUjeDeP9ohAMGuGzI5JI6RVFUyLCVvbE3b9Ll5642e4cPV11DFi9gcYxom4uV54/Gs4tjiBvT1gaUAF2xbwELcSYdsj+nPqWeyxtckRtIJRGkQSypW/D7AY4p0va6yydj5tGm5cXeawKoKKJKpb9C7J69V2aLAvmn1LWZcnQfwryRBJuG1Fo6iyKkiqEI6nZQ6cc5+SoWlWhSmL4sYoIB520ogGSlHqFCnKNGFrawEUxysXf4M3r7w6ZDbvg2GcACCCU9unMa8Eq7m2AeUCATS4qeptbqP1IVM1nYw3UuKyOhmEuiz8hL/SRhQAb1xRcmsF6M8QUHopmCP/KYImhZmPp+xputu9/FqkJpHyeq2RIEQKYICUtdxV13hIPJjcjZ4136veg1Xh+9IuM6q6ImGYySOeNKZ0ShGUhWAxAe/dvYjfX/rXYZifgGGcAE4dOYkzR04DFa335qTviHAu03uPr3fawRaCY2XevKEzyQvNHz4ICQRZUBEWh8IOJhaAENVWMG9z8GekNfyTIQRnH5GeFfGUcaHG3iU+ouInXbwrq83pPAs4HtbyaB4ezHcrry2LYCKpexvXoWKd5SQaB5jELGjoghWUqWCaCnaWt/E/L/xkCIL9EQzjBHBk2sZ22YZVYAEAbHmIZlHFaJPeUzWmolmQiaKMk20jKOwTAlnEpXKAxERHJ5bbGu82bviUDYGEVhDSSLw38en/SEhAaOEm+aG1YUozPmPxJvNkb5VfoexlVqKykksVBu9SJvEjwoNFSIiw9jNp1FGZpfEj1OyLIEjwAszzAe4c3HmQH+mXAoMvRWT4qXAs4JgicwvvQ+5qdWB2p5peZH41qXCtcZm5KCjjgdZPaa3RtUJQ0POo7dOMCvSuIIsH5NCixbzpFR2djVT5t3m+Honfb70caV5TlVKX6UUR12gsGDm9bL4WkAxRM6f2oAFGIcxj7Kz1ZeJvRSoxCCY3qEf2e2r7JF44981B1/sjGJ6TKKDSHIsfQq9WOQ7lmfd593ZZQY3WhDK5bGVbSJLzJIs/mQ9qS+tCxUBgohDzxjoqiuDEZk8SWdjBPbzYnGrJjNWzCgyHe+Xr8RYuI39eotAluubeM0zl/GdhTpxFK4O3jWXwGoR7vubIe72R4nvmnP+WVs3e1gn/4av/G46ULbxy7TXcHUPXH4thnOg5l4hg1gmzzHGvWogy1/RK0u1vordRLWvqdQyF6RWT3+pWQy19LeQT6TdvkgeKCtys3c41vRZD13XyQNLvsnep98pF00eGoWYPNXJPISk/K0zaikGhXRthbgg3GL0l/adx3M0MJoBICWYQi0E1qYFiEfYztDUAswClGhYeUzLbZRs/evZv8e3Hv4PfXvk9Xr/+2lBG+AiGcQKIdgkQTkNQnJMe4vQY9AO0J22jUd7zT6D1/mJqg2GrgvtRcgqyG0k+N4BGiI9dmd76lJ13mp7RmvIePPOSfNT+2EVCgLp6EALCFrUdImvPTBlMUOsIJBkA4FC1m7FFJAA5tW4OEVt7RRlO39NVZd4a70mxcMdiNfaHLgrOHDmLHz73d3jxye/iFxd/jgs332JebYe+kjuUEABslQX+8wv/Eee3HsfywEJJzx2zVyyrt5nFljMSwkSybe9Kj0lPOhWGp8y/IiXT8FJrHtatZvGXqV/kt5atmnYQOPPCGA8TSB945hSJu6EgKsHRF41RLgiCPuepjKBIZcDFVJpsJ80bpuCcaPRwva6H2DF65o4maK3C4W0Wq5T6vGTsYdJ4jkkFWxoRh0wKKcodLIr9eYm7y7sQGK7evYp3b72Ld2+/i93V3sO7GR4BhhLCJ2BlM1Y+R9Uyb1AzKhzQSyn6YDL6oDQE0MLWQX6XPUJhohgTHGTZsLEPj4JMLwB7K+70SQ0WX5B+0xpjqECjINPaF/mzWYUKCRLPCisLOo02uJanqhvUWF1ViZzSlLkvlQAREzSz5/tgNFzwUGCRiBFARBzzGutIG5MqDx6pFqG8h7EvdAvnHjsLEeD8Y2fx7fMv4Nrudbx2/U28c+sdXNs5XMuSRrkMiBvGckWCsgcYw83JgTN6MJfM49aNKGcTgUliP0kuk49WR6/CSiMC9JlMINUT2ELx/hxSyEdFGlavrKYCAkSbcWSYG+eCwK37+vztlOAUy9feK7yt2svHST5scmJbk5Pfj8JTFs7S5g1utZ1eLSBf69XmwRFrCfm1ajHSxqFwmOLMY4/jB1/9O/y7Z/4a29P25/u5bziGcSJu5devv43Zoke5KFSbuyfakO4RKQpNf9A1aUUiVGSuZqIwLqJN75hh6+zSb1KEd0vpkjQg1eSssleItA0utGXxJknsiihUTdIfJ0PiEPqKPylFIhHftnyxeq5ZiNy2UoAlDw13QUxP+9orT30jNp54gOSoGNwptJ1RQbKh1kbZ8o2mkc8eqgmrmsoNjm+e+xZ+8LUfYtLDE+wN4ySWtoIyB4IyBFNKgyAlNdLpxQ1f4GzGt8Ine5XU16HItLNFUj1ztU4mWC84Ne+qkYdN5PQu+EdYaFHJka5o6keTn19juJrKCBOAhXKIOz2ZdCWG1p5F5LJxLUFRFMatXiuMVeSS0ina+7SaW7YRh4wzr86DBMK5VqYMSW/Mtb6evFygD4R7quJHnO+u+MbZb+HxE08++JthQzCMk/jw4A5uL3fgWsI4izbvc29gGH9nblipP5tV277aIL6QSnsV0ahnwZPkgFjZd686H+jhSKnjATDRUy40jFHhzQAKj4/cr5mE+hjNAoed2+nRQueUJmmHBRwzDNUMVg2oUQjKFRMKaUWpVqjiW1LSoIWjZzwsJkWjKcaQ99oB58IBdl4PGKpbhLe1xh93h5vj6OIozh49+2BvhA3C4YkR/gjuHuxgZ7mDk0dPsZoZ2ZIKNWA9yzLZX+wzk1locUcLAdODJXPIg+UWK/k8+y9dBczoVSJCjHZO3OzextYKBOD/z8Ick1VVmGBG0AijjQJG4uGP+3zJWnWQGkLVHa5d7V0l+rtZHEsDzJjAkaXlNdIB20b5HoTcSio8OKBKbx5buIsBRSPBrVmsQm9LId+mpDPWCtfkLR8ODM+5hgLHtjimNTHoyJ+Y76HpG0CloEiJNoCEV2llH4aXKU3S6XSg8YWH69OS6ZDYq3RvOe163uut2psk9RydppdE5JzCtkmwlqjfIGE5EZqGoWs+u6TMGOVPfI2J5E4P5k3BIbxtb63ka85lTYKYQNGWh6PlltluqQ7M1VBrhdXavGStFqF/jgble8FfeubE04cm7zwcr/JTQpTq5pgjTCUH1Wv4x+SdtoZn8yh5U2qT+rBGbUOrWuYImYEr5EG2UDxZeEI+Zm+/53RIrw4LN5UBZC4htXnygMhjIojp98yScotYtleE7Rxye0JKE7npM/pAWQmueVqwLJuHl+dbwvdFkvPXTx2IeHh+d1Z/4xCyahECa1xD4cFVmQ8XrN2k7jhz9CyKTodi/+fwnOsQwVpDINojDEFTfd2AqGJGJMkeYB9Ezns3izyqws1bcUO2NX+Wg8to6gNpx75m+BkBqzuEqw/maqjsd8TwdxSxyhTlXRcNb8iqcWofWXpJ5tVhKxU552neK6lJdFhVQ0VMxzgi91MadRwWobaXHrLv9WTenJ7bs10kEDNIrdTABcQrbJ7biohwkoIqGtRJa9E+jm2fwOPHn3gIN8Ojx/Cca2j5JLKamn94g5DcbRaljahaFhpZkuWlmfcf5myZR8YXDKGoUBHeptCbKI3TmxeT6P3Rm7TprvR+jS8bUiFBmHB6QRakmtdX9jGZMzNUdRpmXm7mqBbNSEyaukNBdayGNf2kdjVNxS/1aCPfJiXR4lWDht3PoK6ewDcRsva469HzkbKN41vHP5fPe9MxjHMd5r2Nx1vHRGEWgpaZo7lo7P4ASJzlzzqDPM+cr0V17abr0h85EpadxOTExmO00S0a5GzOaRnvhwDDxvjtVAQMT1Q9Ck6OkKSEFkw0gNiVEjd+pUtK351BsberzLfGkQPhbsI3SVr+HC8xflN4rQYu202FBc9BbW0hv2XVGWuD6pLzPHz4LDCBag7rifiXGMM41+COtpwovVLekEm1y+CztRWwlm9hTZUAACQNtI9RRRjIQhPQh7bRvVwziszZkIPPLYFl64FlKw9h55r8uGTxWIzCZaicHYz2PGTpZBIpXPQLhERKEv+BrtmbCgf5mGZg+JpPLf0aWeUu2pUDIwiIPBOqPJxSxKxXySFZ9EJ7vurGMHkY56FCDnjlCFXMauZNIn1kbK110G9G0Ftm/Mp9lK22K42nK+SpimTI2tmwuToeECqtW+uBSiOiO71lL8S04JAiY6FUwPg3Q1sWdcJ7x7W14pMHcX9qledgJOWSpGreCkXqbCMJmvd1i2VFzjWBgpiGKZqFIe8JtawpNsBowD3v1lT00xJRAwcEoiKdu2Lkc//8NxHDOInzx87h/PHH26iXFoFExYf3Qh+EjlAxqo6FlclYvZe0NQp5Ze6aTyKC4k5SnGQjLyZe1LuxSxpVD2/XNBKaUWiGo26AZc81mT+0BumvIX82fX8+buSJlMgUwRR+iiFmOvDcHyprrZ7kG2tkmNJfpygPOiTJAU1j6J6ooWWdiKKWxMEQLpT7RZVzaodMOWEYJzGVBaayAOY+GqY85WOjtDMkA8DGeBZnShujRmtbZBYXPUNplV6FNbW9DFWdOWNFUux66Cpuazs3+bXMzcg4cukeNQtIXaEhXkwak0nr7DRx6mghdQJFfC8MI4w3ehp9PK23OTJayOVMjWMMhrTCXJU0yHx52X5KBQkDi2Ha6ZDtvQfQdiNCsfZufKlxuI6iT8C1nWu4snMtwi5OpxQOJzdCO2JKJZkrmWPmNuhUdwfQ1rsD0j2cZO4njf7W0kP32JCNnvdWFp8EnebW/kiXoqxm7RDIcomIQEt+vGz+04DMQcUFrvNDXHve/0YucJAaenW46DqhHpwpRZtYkfxdS35s6C+Z53wqv0alQmtD66A1Fr5myQIAklEcrz/ej8Mygj2Mk1jOy9jXoWB4Fzdj0XWvZUjCQOR14T1CVjKsLPV64t8KaGlTJEovV4UhG2/zkoUPRCUVyCqvQDTZOwKRwnCRl5MHBA0iBb/yv+sUvMxlU50hx89aIcpjqLrWWElf45SIMHjtoAGSpyscCsg+JsfL1n7OOAbGkc/uXVXYag2CfNHOB3YIKjT6nNb1gMHnjAPpcJjnME7C4Xj16mu4dXAbUylcWBsT+7kTpYBzjc4Qlj1JaSFX/PHWBgkPF+Nf/Ir3BkVmkQYLL8FlRBmG9rWC3vJeBSdBgFYUSiPro6Is1LQil5AUD/50vF5bM36wYFRJPqhGT255zbL2u3EMpM5t5toAq17wVlhuqwCRRwbzec22UHynuKEgVBxSrwggZZCRSlHg5u5VvH/r4oO4BTYOwzjXcGP3Bn7x3ktw1Fgiy0oiEKd8SElmPghAgmw+Z9jFmDJDPnOPfZW0GhWGiTQ2zemNlu31QlBWTcWt5WBFU9Iy/hjo1VJNgBTD1PIxiwkTz78Z2uasZY64pcas848BmM2wqoZV5qnSX5t4tm9orNJnQpuOUKs8K0zoDUUgJSZ+1vPZzMNjxQUPqrhaJDc5IHj96ivYPSRCYKMg9BG8e/siru/fxLnFGZSimMwwS9wqVRziIaKZd4xl+Jm5mHnMhIKnPf8Wkukb2Z3eIGQ9lJ61e8L8b/yMrTGB4v9r7aVUB+PXbHOo9kcxC/V6pDC0c7WDQtsPJZWPrRbOUlor5KSHy7mUyMWT5dPEpiUr22iG6eKNhJ9RRCu2eVxf7u0UWF9BIWvVXYnseH/ex6U7l3qe+iXHMM6PYLaKGRYjVMWBVS92BL+N629pNGGRtRUqGH1Gf46P6ayqzgDg1A9Yz53I8hHNnC4Lr1kOAVsgjljD0EPTRoRALsLl/0sPes0B4/hWHBBsz0juRiHbaS2/dBZukjfbKrxAk//Mw0LSOFXaNadcSptUkTzEkmPLqRrPQyWefxZFKYVFOPZii8InwWuXXsPVu1c+vw97wzGM82OQvU4RgZcSy4GssuZJ/iii/dF4rp6UOGk3eKxSkHuK/+o9H1VHX34LDmazgJO5Z1qF59/MxbIabAh+rCEb9prkIABoG9JyFhQMvx0xV4nMFd1ba8WQvVBWptf8uVBjSVjBTj+akUNrkXgYtoOEezhUS/OKofxnUchiQUv4/VBqQBjoFMp81/au41cXf34oplESwzg/Aodjb95rhZs0xok3lUEbAb1mEsbfS28kTaqvty7QiriZn+Xj03dJ5GaS7skl13DG92n8bkbiT8he0nqa8XctI3pdJs3CPLCmQXtcda/cdI+awmEm1kLrJLmHETE0VXDvS3pGZCwe7wWvKkgOU+zmVG8HiPNNUcR7aWbQMrX3XTTIDKKKC9dew87y7gP85DcPwzg/gmoVF25cwPMnn4GoYCpozX6Is7encGGhhwboWKP7IcPM+J3k1goUrs6bP7xT4+mshcWhNJ/q6V0Nr6+BANKfiTCTlZZ2AiSbFxZdzCrHsayr7AGs1ypyUC6JC8bt1EkEUFaL0uCVhZ1UOlAWzxJCA40xsXimScioojGbhERLUA6pOFGTkBFV34m56sG8j/dvH44K7TqGcX4MzI2hmmOGo6mwM9zKkE9UOa3Rix5gJVShoZ+jcaO1mc+WF9Io0lgsvIeIcBcov5FcXy6mLUhN2B5qZo7qEBoKmr6tCuCmMMwUKKPINAn4mdcFmSFfv7NII414bs7eL8kNoU+Uy4pYwGK7RhF5dikK0TC2kup/EBTPcBZh9Ei23loIgMx7gaXNuLP/4cP6+DcGwzg/Bvfkji1cFbYs0CYsZC2shFuoKHCUohTFQgARo+NlRiYOMQnhLPd2Lxp7i1lRKghBr5afaoldn+mts7fB0BNSGkk8VfugEtKScEAKifT5+rSFtm2lPcP1aCGFt8/RtFBu1+ZNJ9W4ngji0RRRGDG4MN8NyQNS8/h6Q82MeXp/P3NetI3FSdIcD0t99l4M47wPUnBLoYDVGDoWhWj04WKBDz1pRr3IUagSy2JVIKiUe0TrEwLGPmfkkG1rmGQEHXntKiNbhsciYI+RBSHmfsbf7WNsXE5El6rOdgykVWQFEnKXTbdW2g4WlVgTn0ix6mQDNt4wcs4SLCg5oCyb8YBq3Ic8w5DTLvG6ZmMVTPI1xGOZoy2SUplwdOsodkfOOUC/wh2c9JqMPxXAxLs0b/g2KqaRf5ai3OgcN7xayDOHrEdOmcQztakNcPA5K7VAk0zJpshsBkbJJIWjzY+G2l2E2oXX5/3UILqBG4Aq0g4WgNM06aWke/rCcFYyn5YYI8vpm+xP1nw6ycIU1toz4YUjZ06CPFcHYk1Qu1W9uWvFDUfLETx16hlcO0RtFGAY58dDevugGSSAmbmXoFdMG3VNkjOqjX2zvqckjcSNj6wCqVkc6otoo/0grTEfzJnsUdKYG3mBzwvc03dE+/9w1bPHjW8KKCU6c/FSSqFo/j9Y9FnnFDNMbTQ8MzTxd4lceaLESg8jeg7L8Zvw8t71lVJFoVA5MBhRHOzmIaUC7C538cEhoeytYxjnxyG7C6S05NxysciDxJMC1wLf5m0Kx6+ED7TeYkkmT4alpoCgQC3U5tTy5kcjVrZwmR7aTZqwllgv/DSiAKju5+FdXQvEK5zznlLCqkICM8LSkp4f2TrioUBV9uxXZiEsnyeVEYRGHTFrXgNaHFtppaVdIxlJANsrnbggJFnExvDY/2k2Y3+1+zl+wF8MDOP8ODA2c3pAgUEt2vNs30Msws5wCtIocfSVCLPtMyIZyk4aJIBQN3BoznFaDY/GUDPJAaFV28fFAKoS5PdTqo/5bBDj0XqkUjhrakkor2Q8OYpLK8RkLD1JLLdt8izIjdXWPKvTu4vFlrBkF0V+HsWnKJwZJ1vCQ1d0OqDyEMucNadSkp+boXHEKaMgNHAPund0F46SFUwm8CpQr6gexPdspMc/hbOQXZvWJDY7C8KzOirMgBWNLYaS0RQMGCyTbC+9qiutWxqG18jl0gzT3WDGPDWNlNcQkbNSdSG7pWib0yZNGU3PYmoUaaQbT7KfkpCvGQizj+oKiFSS2GOyJeVA44DhhA2V341Sm8jX7lFpjq3eUYS7uXsTc109rA9+YzCM82NQPco3IiUKFmotnBRyTVUUVVq7ELnMR83JOw9PpKyiShE0rrpmIYY9Po0CS5RBAJoTaYNr9D/3XgRyyoawNyr0qOphLpWHgTFUFiVZoam2S9OXZZKNlD26d+u2U/MnQuWk9DE9bq8TmcfScCVZRkA/XHgYKV+DO1UQ1jYHCxPcrITLJLiyewnLunwwH/YGYxjnx+CDDy/h9t5tnH3sHFyBIqFNO5thhqA6PVHeQNIGQUjf626nzXMCfcKfxtA2fSG8stUIOYtkd5VyIEnFAyUq0XPAftdnlhvepvH++JyF11lVAAMmHiBROc0pmjh4pLGa+GfNA+dhoCxKVYlAPwwqnjMLRalXJKKk9a61TdxRvBeJ2rWC5AdOux/YEtcP2dLcxDDOj8GqrrBXD3ijMpcrkR+ZMQdEUNMmCgEg+aLOFQPBP2M6SAFnehw3RA8PNNqsTKo2ga6kwEkb+uc6wd77SLPuF+6Za3IeVHv+GmFx+KaoxwRTSJC92vBeklpa0snsjjyIwuhz63cqHDQSoCNUGRgdtP0xDC/WCtKAcFuLZyMFjY2U6oVaCm4ffIirdy4/sM96kzGGrT8GK5vx+o0LEepR0T29mDMf05z2kHvfxKYp5I55rpirY+YSoFod1ZJUztaLru3a5PhVkwWx7j37+HEMLluTBwnDmi0UDNoeUfQxr1bJzXYLC1bp4cWNezaVygZk9MBbTplLgbP9kZlxkn0rp0sEaOSHLF6ZA5Vto6wsN92hokCRtQ3eDin8ugJvXH0Fe4dkuPqjGJ7zPphtjhvb7qXLhffkvCJpacaVCq2i6NxvqYIlWxhFtKmwRxGGUZwCar1amno5bWkSfxaWITC9Nvs3DDSjZUFDEwmGETyLLdk3VIaWGV0rw1bhDs/MR1MkmqE5PFoyiIXB6YkrjTC9Zxp8snzQqsC9B5sV6wzVlRXpFEwrk0KK4Ob+Tbx65WW8cum3h7RWO4zzE+ChIkC6nNf4/whH6RGym+4II878kzlV6ve4cUKSfdDcai2SLRBqB3nwSmNRfRSJMk/NNso6LS81VFKZT3iYhBJ7ejiHqkHyUb2GgYKVZUtvj1YN7hAAEcIWenFN6h+J+CLCAyWGxduKiEx5Nby8iDaiQoTz9x4cDoTSoAgu3HgdP7vwj7i7f+fQGiYwjPO+WNYVZq9QKRFmcl9KqMcpxKypj+eGakHkksLxqDCu2Ajm7iha4KmkgD5XqVwxYGlwWTjSNc9jaaC9KJRNfeNjZPrrBgomsLprgEpUW1txlTlmI8LzEEKSGhie57ha+3nEYzkbqaEPlJS7/P6972WGq8p19Dl84ul504uLYOkr/OriLw7lFMpHMYzzPnj/ww9wa/9DnNk+3Ue9zKNS224mcEaS85juEKUoFdDzLbZbPK1B06uB1dysOsXOkzC40MmlwwQ0ZjxzT2YqtgNoo2ru1qh1cEAKq8nQkARhYcqx1rMEY2vmycLq7/pBUMkKkKT6adcCigOFpZ92QRqHgzBXLRrkBmRxuRtzSrhkIffdW2/j9t6tB/jJfnEwCkL3wUFd4p3b77aCTQoyg0WbuKcFLiGtkdVRqHCXJaJCiah0GrS3J2hEmaUG0yhu/OTIZnFGEZIfmrssGXqmwHN3rhz6JpuoIiY+2rq/rKDyMAimT3ZVqXjLwehqhmoVK2rYBmvHQ6dWlAUpaZM7UbEy7oORe15HbE6Lgo+w31QdWFkUypwynlDB7ryDX777zziY9x/KZ7zpGJ7zPjA3XN65AvOKotz5AWmnvvHvzKFyggUIk5vB4g/JCRzMCLqccqDa41HbBi6WU3IVYHOb7Ds6b/7oR3ahL+FiW8uWBNsXqSWbtHIRLiAysMADuFlko2ZYWyIRPcq1qq+xt6upIpjMntwtk+Eyx+aolc8DJmZYM0DosbW0YpAI8NaN13Fz9/oD+kS/eBie8xNw8fb7eP/OBygT1d8FWIhiQbmNIqFDu+D3Uuy5Ga9XxMYusoYkKWnRIy1wiigDygmXzL3UBQX3Fk80vTakUeJCfjM9q7dcMdM+hcZjs8kYHlOaV89mZmfzrPU2Aa5R6MeOeG2Gy1mWMFhF27dSWHkOCiKlSRxtp2lEGbEFWzR0bPdtF69dfvnQqLl/Ggzj/AQczAf4x7d/ivd2rkBLrA1Inim0G1ozBV9rKHjMq8SUB4ev+RhhVIXiVd3owgC18ViTIJBkCLTwF0jWjyIOBlVHru6jQwOQ86Pd83JLIGcnwwirh6dPQejqsfbdXfr1ASEi3Sq6mmlyNlsAoHGLBbFvpo2AeZAQoniVtIMonLkafv7OT3HjkDKB7odhnH8EN/Zu4n9c+CfcXe1Bi0AnabqtyrZGy7U4fZHyH2wwIJbSCraKYFF407bEFU17R4SGJg7RJibSwj/IGvEhDTkeIbwYubLhQYHsigI5usafZkitwk1h6GNn6T0jZ0bj2QabLlYeKq8DyNnMtg4Jwt0wELSFRRm2xmuIxRZaFtCpAEWxb/u4vnP1ULdNPg7DOD8Fru/ewGs33gg6XgEwATopShH+KQgNnyiYINstWa2UXMOQ/8/NYcjdnb0404zJDU1UGeBSJG2sIuQSoaIhgC1oa/5a4QcaBRfWnoybv8S7lysSsp8FXe2gK0CsadTmgQSyeHJhDFsqEZLzGBDhEDn4vd7NFC3RD1UNTeBJ8Na113Fr58bD/Ei/EBgFoU8Bc8NvLv8W29MC3zj9HLbLEQDRwjABVnPkcyqxmt5tjjdWenEnqHJA92JryZ1zptEtpkocnABZ3xyWRRdhXxTIh8xCi3lKnbBNIsb1DQCM86m5SLcVnDI0B5rINL+ZxZu2aY1fz0Ft1pdb09JJaoh/54B4bBorpU+auEZ7RYpgZ76L16/8Dua9/zsQEHe/byyxrkU6ENMp54+exX/42t/hqeNPweYguc/VMFcOCluN8Sr0XZlKVT5tbpA7KqUyLI417WLREK2+pisE5mkUWwaLSGnVmUvWVkXOAg7DZuZ9wuakqmKil03+a7RPwrCDzdR7oEVTpIvMpTh9+Jx8Y1itFVUSHegrJbSUpqKYJubYkSBDVXF3eRs/fvO/4/2b7xzqkPZ+JjjC2j8B1Ssu71zFf3vzH3Fl51rIVapi0sL8jwYhhT3ByMFIhImJjSzK0JuGWl9UL03COEDZzNlIAMiGvcTkipYu84F75iZ777Qi9oCmHlHWQB2OmeSI5j7bVi80HV60ZwXo11mZ5bUwXHVnXzSfOdkJ+begl7oLoihWgA+XN/DjN/6/Q2+Yn4RhnJ8BN/du4eUrv4dhZjuAvUtpq2QBABPDWqxFIAK0NQPrldnYFGswu3eZUPZRYbWJYBlX0ccfhsj0aDDvv29J0vcWPseqwF7NhaPnyeL9mtZyTzDMXn8VmStnXu3cYB15tzTSg5BQH7IsijIpru9exf/98v+F929dHIb5CRg552fE5btXYDbDpbBd4kg15bY8DGzAS/QtOE/SqpmAdNkPhG+qnmElWsU1CQVea1vnF8POYVjunZRgSBodK7XUCUoihFLSsnlAcOrLs+7rzasrjTQquwqgNqZQ6tN6e75OMFDtPc5kJGW/VgC8cfX3uL1384F/Rl90DOP8jKhWsawzpsV2i95EC5SKeEb5zBaSIhvwPVdM4xTSh4Lqp5ib0HTc7lYNk5P+ZmlAOR3Sc5Y0qNbEyULP2vND8hoAMgkae0k8xL3cwAkRb6FVMqK8tUaSGigtMMgpG3ZMwjA9JlkKQjxsb7mDyx9eerAfzpcEI6z9jLh9cBsXbr3dhoaLKCYPNtBCga2iKKLMKXtPM8nlJYqp8aWWMzo3YgfftjLvnM2xqh4yKcxHqyWbZ21jNg2v8N+0PVaCyeRJQwL7qpocXkES1aepYFEUohNCRylD88w3uT5wLQ/N8D4sswTJgjsaGg3QFXvzwSC2f0oMz/kZYe549frr+Pb5b2PSLYhX9g9T54MeKaf/IZg4kgzerLVWFouMIlex4EdYPFpvo0ThJnuIExxJGUSb/0yoAHM1uBQY4kPOdktMonUGEoBGLDCGsYXjZiXIwBFWW0QLcG9D31h/3uzjIg+be3hENFDgvdsXsTqESnqfBcM4/wzc3LuFa7vX8PSJZyLXkwKrgNfguVZk25CxnisnUshZZb5YWaSh6QY5Qbr+D9a+l6GsiGDiuvqYw4zToLLa2lcrSPPWsdHLk0PHopB3kgGLQWVS9mY9Dok58t0CxQyHWw/V46JSUFN6+MxQOVqpHo89Tbi5exN19DQ/FYZx/hnYm/dxdfcanjn5TBRzEPs3NdXvKhrLR7wrzbVFuIhV9MZ5ymzEZw4XQ95heG3NAtZGxbLIg5g17aQAejug8WeTESskCKQxqSi6ng/7oJM2vq+5RT93CZgabGWNYYTMQdnXieJSDIZ3o9c2ZD3DcFDHONinxTDOPxO/eO8lPH7sCTx94mkUdUitQV0zR4l9CeSyxs8nOWE9KEwBaFWFgyNc2R3JqJHeMn7eMGlpc5ypAC8tdrVQ8stNuUQWh+J5SbmjBKAWoEyCaZpQJsXOagd39m9DITj72DmITMAq2jkuxkFyJ5m9F6RyEEAlcmotYfgmig8P7h7KnSefFcM4/0zsrHbxj2//E/7tEy/i7GOncP7IGSx0O5YHkYierQ1IUOiyTbJuIEFVVUAMlWFjF6fuDf++izqMoSDIC22SJcNnahrZmmZIm59hpTgnTPpgdDznneUd/MNr/w2Xbr2PogX/9unv4dtPfBdHt4518kNNr84JFsqPZNGpkd8RxTIpgovX3sD+au+hfTZfdAzj/Bxw5e4VXNu5hiKKF859A//Hc3+PRdlGFYPVKIhkpGsCeGF7oaLR6sILGSZQ78ci5A0mjjVR6hwZozIucuolIllro2fZ/3T+niOHo7Na1Qs2qTULi52d7928iPdvXoS5YbYZL737L3jv5kX87y/8R5zaOkOmU1ALPC6mLXAqKgBV77UUoBRgq+Dq3mW88sGvB4f2T8BopXxOMDesbMYbN97CreWduDGbhkgY0ywaJALJyY7I8WLWswQPdZqwIP91odxfIkEPnBRY5Ewp0DxeeqrYZBbXEsrskfMWjpE5yQyCYPzAPA4PY1unRrX4yOIxLMqivTZ3x+U7l/GTC/+Ela+wtZgwFYFwwHrihE5ozSrKoqBMBWUSLBYFLobfvf9L3BoqB38ShnF+zgjFOgsj1CB7owic/Nlmr1ogpYT4VVFMJXm6goUqtlSwoKJ8zH9SIUEUU5LXk2wQTwxIbqBO+h3nMHPGs9dvWYBipXiNNugGPHHsKZw4cuoPXtvlDy/hd5f/Fa4V00KxKFE8WkwSRaTFBCkTdJowLSZsLRaAVrz0zk9x4eqrD/FT+HJgGOfnjNlmvHrt9VjpDrTVgAtOZxQNYyyTokxTV0FIrm3hDV5ydT0wFe78zNzQ+1B26PcEsycMkd60KJUXNCq0HqJdsxkqHDO8MZHMQjF+NoFXx5Yu8L1n/grb0/Y9r61axUvv/gt+/cEv4WrQRcG0KNjamsJAU8FeFYqCWmf8/O2f4rfv/xLV5kfzgXyBMXLOzxkOx6vXX8OJ7WP4N+e/g+3pKL/hUDMUjeqPWYhe5SylaPQtk3wgEkLOakFQ8Fwq652cIN6LPBkmu0SuqhItDVe0CZRcX2+ZYyLyVIVAi8R0SjVMVfDcma/h7dNv4MK1N+55fbPNeOniSzAA33nyL7DQ7ShROeCpvSeCaku8dPH/x28v/Xr0NT8jxjznA4KK4rnTz+Evv/KXePrE05ikQCxT0BwFc4gHMyc0iSLnyzDTuGPF3VArGbISfdEUlhZZ36QtJCgF2d2qwazC5spRtW6Us/frdIBeXbA1TZi2JpStCR/cfQ//8Or/i92P2VUy6YTzx5/Ac+e+jmdPfxXHFsfhHo/x4f5NvPzBb/D61VeHYX4K3M8Eh3E+YGyXbTx7+hk8e/JZfPPs13FscaxVOAVGIeqJ4s+s6FpMYMZyoPB8cw2922qGFdD6msGtTXWC3rqp7D9aTIDDzCgOjWhx5JyoAECEwSohubLYWmBaTCiLgl9/8Av8y9s/u/8NBMFjW0cx6ZSsRazqEnujZfKpMYzzEUNFcerISXz38W/je0+9iCP6WKizAzAtqGT6BJuotqmPeQ7VAZuZG7o1T6up30P2Ua3WSkRztjs8tIOaOoIHN6matakVQIBSGqNnsZiwtSiYFhN2bAf/z8v/J27tjhGvB4WhhPCIYW64uXcLP3v3n/Hjt36CvboH0QKokpBOrR+nOgK94qKEcl+KiS0mxWIq1M4N0nrkfLm+Lz/sMFynYXqGx26YrbLlEl8z9O+711Bj8ApYxfHFcTx/7puP8q07tBjG+ZBhbvjd1Vfxk3f+J5ZeAa5pcKPCOknqxokW974qQQqoggcqFihbJ2gDzhIVn6a4J0glBC7HbXYbciWzC8PnGmsLabBuabzAkyefxtZHKrcDDx7DOB8B3B1v3ngT79x+By5OT5n9zyS8A1FqRVNJbwp+FoWjzptleJojXyQmmBsFosFVgoYUGEm1hTwYrNa24tAs2ixRUTacPXr2HlLCwMPBMM5HhL3VHv7hzX/AhVsXgjxA5o97bMC2ai0krW6hjmdBHBB3qMX8qHEhUOVK99QWavtL6Imzn9rJ72GUMAsRawEcEfa61dC4rQZ1w9GyhSdPPvUo365DiWGcjxB3l3fxi3d/jr3lXW56BnKNu7gwxHRIzGHHMqGwmUamd8v2C43OKbzAruOk0lQSQDFnIyE9NWpTMaEPRUt7fpijyISTH8MYGniwGMb5iHF15xrevvU2nG0VR5Dj5+iAwKuv9T6zWRGhajWsTZUoyeyk+kXC2SruuSsU8KZDCyTdsI+S9e1lIZdZ3VFzrmzgoWK8448Y5oZ/fvfnsW4QKVsZHm1GLOtdGTWDOFDtToJBdarmUelOCzVGou85aa/gRohrzdNKGm5w/lAl922GioNLypEAe/Me3r895jAfNoZxbgB2Vju4sXM1ck5IbM8GkLXWWFYbu1E8uX2prBfyBuHwFLHmQPpmsPSICiF1MESmK4KMH8wkqiXIvaLRDsBVUH2FnYM7D/dNGRjGuQlwd/z+6ivYW+12iRIkDY9znDmjySFmKQophTlk37Up8JhKyeVGuQwTOdFSsNAgxad+rapiayqYpr42Ir4Ojn7el6cy8AAxjHNDcPnOZfz47R9jr+7GugWlakLhlIcK80Agx8Tarswkz/OxRLSNn02cipEShpcjaQthsagojkwF21MY6GLBOcwJ6as5bjbwsDGmUjYEDseb199AdcNfPv3XeOL4U1BX2FwbBdYt2US9oNMGrgFyZuPxUtArt2+vnEbKTFIhMTCNMGA09T3A3eh9gUkdF6+9jb3l7iN5Xw4zhnFuEByOt268iUt3PsBzZ57Ds6efx9MnnsHR6QiKxOyXwWEmLZmMbfbOVX+x8t1r9CuhGjKd6pjYIwXzysJRFpXYzwnuXNHQQ2mLbqHA/rw31sE/Agzj3EDsr/bw6pVX8MbV13F8+wSeOvEUnjzxJLbKAs+c/ioWegQLLexRhtHEbsxKPR+DeJLfsSZlEo+feajCMRUKhoWwLQp7pNYmVnCPgt/Aw8Mwzg1G9Yrb+7dwe/8WXr36ClQUx7aP4ZlTX8W5o2fhAJ46+RWcPHIKxRVTWYT8iQHADFiEp5QL4jY0FhokluIuqM7n4TIBBMe3pCG7o3BZ09gI9nAxRsa+oMhVCkcWRzCVBU5sncBXz3wVp46cwjfOPAc1oK4q5rlSxdLBLfUoCJaQlL5JrFWH1wl+7tCiuLm8jf/y2/+C3ZF3PhDczwSH5/yCIr3Y3moPWO3hzv6HeP/D97BdtnD5iW/jL558ESfKcWjR2JfCcFU4XF1Uc8kKw+Oej8KDHCHmqJKyKAMPG8NzfgkhEDxz6mn86Lm/xdmjZ9tECsSxqob9uofd1T7eufkOqtdeARbg9GMn8ZWTX4kCkAhWVvHSuy/hnetvj7D2AWEoIRxCHN06iudPP9vGvVyAg3mJ9269j5WtsJyXf/A7RQoWpUuOOIDlvByG+QAxjHNgYEMxZEoGBr5gGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChmD7pm+7+sK5jYGDgIxiec2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQ/G/APDEFiPMgQSgAAAAAElFTkSuQmCC\n",
687
      "text/plain": [
688
       "<Figure size 432x288 with 1 Axes>"
689
      ]
690
     },
691
     "metadata": {},
692
     "output_type": "display_data"
693
    }
694
   ],
695
   "source": [
696
    "img_path=r'C:\\Users\\Vinay Wadhwa\\Downloads\\archive\\C-NMC_Leukemia\\training_data\\fold_0\\all\\UID_11_10_1_all.bmp'\n",
697
    "img=plt.imread(img_path)\n",
698
    "print ('Input image shape is ',img.shape)\n",
699
    "plt.axis('off')\n",
700
    "imshow(img)"
701
   ]
702
  },
703
  {
704
   "cell_type": "code",
705
   "execution_count": 13,
706
   "id": "ed426a89",
707
   "metadata": {},
708
   "outputs": [],
709
   "source": [
710
    "def preprocess (sdir, trsplit, vsplit):\n",
711
    "    filepaths=[]\n",
712
    "    labels=[]    \n",
713
    "    folds=os.listdir(sdir)\n",
714
    "    for fold in folds:\n",
715
    "        foldpath=os.path.join(sdir,fold)\n",
716
    "        classlist=os.listdir(foldpath)\n",
717
    "        for klass in classlist:\n",
718
    "            classpath=os.path.join(foldpath,klass)\n",
719
    "            flist=os.listdir(classpath)\n",
720
    "            for f in flist:\n",
721
    "                fpath=os.path.join(classpath,f)\n",
722
    "                filepaths.append(fpath)\n",
723
    "                labels.append(klass)\n",
724
    "    Fseries=pd.Series(filepaths, name='filepaths')\n",
725
    "    Lseries=pd.Series(labels, name='labels')\n",
726
    "    df=pd.concat([Fseries, Lseries], axis=1)            \n",
727
    "    dsplit=vsplit/(1-trsplit)\n",
728
    "    strat=df['labels']\n",
729
    "    train_df, dummy_df=train_test_split(df, train_size=trsplit, shuffle=True, random_state=123, stratify=strat)\n",
730
    "    strat=dummy_df['labels']\n",
731
    "    valid_df, test_df= train_test_split(dummy_df, train_size=dsplit, shuffle=True, random_state=123, stratify=strat)\n",
732
    "    print('train_df length: ', len(train_df), '  test_df length: ',len(test_df), '  valid_df length: ', len(valid_df))\n",
733
    "     # check that each dataframe has the same number of classes to prevent model.fit errors\n",
734
    "    trcount=len(train_df['labels'].unique())\n",
735
    "    tecount=len(test_df['labels'].unique())\n",
736
    "    vcount=len(valid_df['labels'].unique())\n",
737
    "    if trcount < tecount :         \n",
738
    "        msg='** WARNING ** number of classes in training set is less than the number of classes in test set'\n",
739
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
740
    "        msg='This will throw an error in either model.evaluate or model.predict'\n",
741
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
742
    "    if trcount != vcount:\n",
743
    "        msg='** WARNING ** number of classes in training set not equal to number of classes in validation set' \n",
744
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
745
    "        msg=' this will throw an error in model.fit'\n",
746
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
747
    "        print ('train df class count: ', trcount, 'test df class count: ', tecount, ' valid df class count: ', vcount) \n",
748
    "        ans=input('Enter C to continue execution or H to halt execution')\n",
749
    "        if ans =='H' or ans == 'h':\n",
750
    "            print_in_color('Halting Execution', (255,0,0), (55,65,80))\n",
751
    "            import sys\n",
752
    "            sys.exit('program halted by user')            \n",
753
    "    print(list(train_df['labels'].value_counts()))\n",
754
    "    return train_df, test_df, valid_df"
755
   ]
756
  },
757
  {
758
   "cell_type": "code",
759
   "execution_count": null,
760
   "id": "beb2abf0",
761
   "metadata": {},
762
   "outputs": [],
763
   "source": [
764
    "sdir=r'C:\\Users\\Vinay Wadhwa\\Downloads\\archive\\C-NMC_Leukemia\\training_data'\n",
765
    "trsplit=.9\n",
766
    "vsplit=.05\n",
767
    "train_df, test_df, valid_df= preprocess(sdir,trsplit, vsplit)"
768
   ]
769
  },
770
  {
771
   "cell_type": "code",
772
   "execution_count": null,
773
   "id": "6c3cd360",
774
   "metadata": {},
775
   "outputs": [],
776
   "source": [
777
    "max_samples= 3050\n",
778
    "min_samples=0\n",
779
    "column='labels'\n",
780
    "working_dir = r'./'\n",
781
    "img_size=(300,300)\n",
782
    "train_df=trim(train_df, max_samples, min_samples, column)"
783
   ]
784
  },
785
  {
786
   "cell_type": "code",
787
   "execution_count": null,
788
   "id": "e337f302",
789
   "metadata": {},
790
   "outputs": [],
791
   "source": [
792
    "channels=3\n",
793
    "batch_size=10\n",
794
    "img_shape=(img_size[0], img_size[1], channels)\n",
795
    "length=len(test_df)\n",
796
    "test_batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=80],reverse=True)[0]  \n",
797
    "test_steps=int(length/test_batch_size)\n",
798
    "print ( 'test batch size: ' ,test_batch_size, '  test steps: ', test_steps)\n",
799
    "def scalar(img):    \n",
800
    "    return img  # EfficientNet expects pixelsin range 0 to 255 so no scaling is required\n",
801
    "trgen=ImageDataGenerator(preprocessing_function=scalar, horizontal_flip=True)\n",
802
    "tvgen=ImageDataGenerator(preprocessing_function=scalar)\n",
803
    "msg='                                                              for the train generator'\n",
804
    "print(msg, '\\r', end='') \n",
805
    "train_gen=trgen.flow_from_dataframe( train_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',\n",
806
    "                                    color_mode='rgb', shuffle=True, batch_size=batch_size)\n",
807
    "msg='                                                              for the test generator'\n",
808
    "print(msg, '\\r', end='') \n",
809
    "test_gen=tvgen.flow_from_dataframe( test_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',\n",
810
    "                                    color_mode='rgb', shuffle=False, batch_size=test_batch_size)\n",
811
    "msg='                                                             for the validation generator'\n",
812
    "print(msg, '\\r', end='')\n",
813
    "valid_gen=tvgen.flow_from_dataframe( valid_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',\n",
814
    "                                    color_mode='rgb', shuffle=True, batch_size=batch_size)\n",
815
    "classes=list(train_gen.class_indices.keys())\n",
816
    "class_count=len(classes)\n",
817
    "train_steps=int(np.ceil(len(train_gen.labels)/batch_size))\n",
818
    "labels=test_gen.labels"
819
   ]
820
  },
821
  {
822
   "cell_type": "code",
823
   "execution_count": null,
824
   "id": "45192270",
825
   "metadata": {},
826
   "outputs": [],
827
   "source": [
828
    "show_image_samples(train_gen)"
829
   ]
830
  },
831
  {
832
   "cell_type": "code",
833
   "execution_count": null,
834
   "id": "be76b54c",
835
   "metadata": {},
836
   "outputs": [],
837
   "source": [
838
    "model_name='EfficientNetB3'\n",
839
    "base_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False, weights=\"imagenet\",input_shape=img_shape, pooling='max') \n",
840
    "x=base_model.output\n",
841
    "x=keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)\n",
842
    "x = Dense(32, kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),\n",
843
    "                bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)\n",
844
    "x=Dropout(rate=.45, seed=123)(x)        \n",
845
    "output=Dense(class_count, activation='softmax')(x)\n",
846
    "model=Model(inputs=base_model.input, outputs=output)\n",
847
    "model.compile(Adamax(learning_rate=.001), loss='categorical_crossentropy', metrics=['accuracy']) "
848
   ]
849
  },
850
  {
851
   "cell_type": "code",
852
   "execution_count": null,
853
   "id": "3a18c5d9",
854
   "metadata": {},
855
   "outputs": [],
856
   "source": [
857
    "epochs =20\n",
858
    "patience= 1 # number of epochs to wait to adjust lr if monitored value does not improve\n",
859
    "stop_patience =3 # number of epochs to wait before stopping training if monitored value does not improve\n",
860
    "threshold=.9 # if train accuracy is < threshhold adjust monitor accuracy, else monitor validation loss\n",
861
    "factor=.5 # factor to reduce lr by\n",
862
    "dwell=True # experimental, if True and monitored metric does not improve on current epoch set  modelweights back to weights of previous epoch\n",
863
    "freeze=False # if true free weights of  the base model\n",
864
    "ask_epoch=5 # number of epochs to run before asking if you want to halt training\n",
865
    "batches=train_steps\n",
866
    "callbacks=[LRA(model=model,base_model= base_model,patience=patience,stop_patience=stop_patience, threshold=threshold,\n",
867
    "                   factor=factor,dwell=dwell, batches=batches,initial_epoch=0,epochs=epochs, ask_epoch=ask_epoch )]"
868
   ]
869
  },
870
  {
871
   "cell_type": "code",
872
   "execution_count": null,
873
   "id": "af65bfd1",
874
   "metadata": {},
875
   "outputs": [],
876
   "source": [
877
    "history=model.fit(x=train_gen,  epochs=epochs, verbose=0, callbacks=callbacks,  validation_data=valid_gen,\n",
878
    "               validation_steps=None,  shuffle=False,  initial_epoch=0)"
879
   ]
880
  },
881
  {
882
   "cell_type": "code",
883
   "execution_count": null,
884
   "id": "bf5aff88",
885
   "metadata": {},
886
   "outputs": [],
887
   "source": [
888
    "subject='leukemia'\n",
889
    "print_code=0\n",
890
    "preds=model.predict(test_gen) \n",
891
    "acc=print_info( test_gen, preds, print_code, working_dir, subject ) "
892
   ]
893
  },
894
  {
895
   "cell_type": "code",
896
   "execution_count": null,
897
   "id": "09616c28",
898
   "metadata": {},
899
   "outputs": [],
900
   "source": [
901
    "model_save_loc, csv_save_loc=saver(working_dir, model, model_name, subject, acc, img_size, 1,  train_gen)"
902
   ]
903
  },
904
  {
905
   "cell_type": "code",
906
   "execution_count": null,
907
   "id": "4926bf84",
908
   "metadata": {},
909
   "outputs": [],
910
   "source": []
911
  },
912
  {
913
   "cell_type": "code",
914
   "execution_count": null,
915
   "id": "c14e1862",
916
   "metadata": {},
917
   "outputs": [],
918
   "source": []
919
  }
920
 ],
921
 "metadata": {
922
  "kernelspec": {
923
   "display_name": "Python 3 (ipykernel)",
924
   "language": "python",
925
   "name": "python3"
926
  },
927
  "language_info": {
928
   "codemirror_mode": {
929
    "name": "ipython",
930
    "version": 3
931
   },
932
   "file_extension": ".py",
933
   "mimetype": "text/x-python",
934
   "name": "python",
935
   "nbconvert_exporter": "python",
936
   "pygments_lexer": "ipython3",
937
   "version": "3.9.2"
938
  }
939
 },
940
 "nbformat": 4,
941
 "nbformat_minor": 5
942
}