[198e90]: / leukemia detection.ipynb

Download this file

943 lines (942 with data), 67.5 kB

{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "59654c10",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "modules loaded\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import backend as K\n",
    "from tensorflow.keras.layers import Dense, Activation,Dropout,Conv2D, MaxPooling2D,BatchNormalization, Flatten\n",
    "from tensorflow.keras.optimizers import Adam, Adamax\n",
    "from tensorflow.keras.metrics import categorical_crossentropy\n",
    "from tensorflow.keras import regularizers\n",
    "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
    "from tensorflow.keras.models import Model, load_model, Sequential\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import shutil\n",
    "import time\n",
    "import cv2 as cv2\n",
    "from tqdm import tqdm\n",
    "from sklearn.model_selection import train_test_split\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib.pyplot import imshow\n",
    "import seaborn as sns\n",
    "sns.set_style('darkgrid')\n",
    "from PIL import Image\n",
    "from sklearn.metrics import confusion_matrix, classification_report\n",
    "from IPython.core.display import display, HTML\n",
    "# stop annoying tensorflow warning messages\n",
    "import logging\n",
    "logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n",
    "print ('modules loaded')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "fa7de5f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "def show_image_samples(gen ):\n",
    "    t_dict=gen.class_indices\n",
    "    classes=list(t_dict.keys())    \n",
    "    images,labels=next(gen) # get a sample batch from the generator \n",
    "    plt.figure(figsize=(20, 20))\n",
    "    length=len(labels)\n",
    "    if length<25:   #show maximum of 25 images\n",
    "        r=length\n",
    "    else:\n",
    "        r=25\n",
    "    for i in range(r):\n",
    "        plt.subplot(5, 5, i + 1)\n",
    "        image=images[i]/255\n",
    "        plt.imshow(image)\n",
    "        index=np.argmax(labels[i])\n",
    "        class_name=classes[index]\n",
    "        plt.title(class_name, color='blue', fontsize=12)\n",
    "        plt.axis('off')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3cc88fda",
   "metadata": {},
   "outputs": [],
   "source": [
    "def show_images(tdir):\n",
    "    classlist=os.listdir(tdir)\n",
    "    length=len(classlist)\n",
    "    columns=5\n",
    "    rows=int(np.ceil(length/columns))    \n",
    "    plt.figure(figsize=(20, rows * 4))\n",
    "    for i, klass in enumerate(classlist):    \n",
    "        classpath=os.path.join(tdir, klass)\n",
    "        imgpath=os.path.join(classpath, '1.jpg')\n",
    "        img=plt.imread(imgpath)\n",
    "        plt.subplot(rows, columns, i+1)\n",
    "        plt.axis('off')\n",
    "        plt.title(klass, color='blue', fontsize=12)\n",
    "        plt.imshow(img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "564a3b4b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_in_color(txt_msg,fore_tupple,back_tupple,):\n",
    "    #prints the text_msg in the foreground color specified by fore_tupple with the background specified by back_tupple \n",
    "    #text_msg is the text, fore_tupple is foregroud color tupple (r,g,b), back_tupple is background tupple (r,g,b)\n",
    "    rf,gf,bf=fore_tupple\n",
    "    rb,gb,bb=back_tupple\n",
    "    msg='{0}' + txt_msg\n",
    "    mat='\\33[38;2;' + str(rf) +';' + str(gf) + ';' + str(bf) + ';48;2;' + str(rb) + ';' +str(gb) + ';' + str(bb) +'m' \n",
    "    print(msg .format(mat), flush=True)\n",
    "    print('\\33[0m', flush=True) # returns default print color to back to black\n",
    "    return"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "9bbdb8ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "class LRA(keras.callbacks.Callback):\n",
    "    def __init__(self,model, base_model, patience,stop_patience, threshold, factor, dwell, batches, initial_epoch,epochs, ask_epoch):\n",
    "        super(LRA, self).__init__()\n",
    "        self.model=model\n",
    "        self.base_model=base_model\n",
    "        self.patience=patience # specifies how many epochs without improvement before learning rate is adjusted\n",
    "        self.stop_patience=stop_patience # specifies how many times to adjust lr without improvement to stop training\n",
    "        self.threshold=threshold # specifies training accuracy threshold when lr will be adjusted based on validation loss\n",
    "        self.factor=factor # factor by which to reduce the learning rate\n",
    "        self.dwell=dwell\n",
    "        self.batches=batches # number of training batch to runn per epoch\n",
    "        self.initial_epoch=initial_epoch\n",
    "        self.epochs=epochs\n",
    "        self.ask_epoch=ask_epoch\n",
    "        self.ask_epoch_initial=ask_epoch # save this value to restore if restarting training\n",
    "        # callback variables \n",
    "        self.count=0 # how many times lr has been reduced without improvement\n",
    "        self.stop_count=0        \n",
    "        self.best_epoch=1   # epoch with the lowest loss        \n",
    "        self.initial_lr=float(tf.keras.backend.get_value(model.optimizer.lr)) # get the initiallearning rate and save it         \n",
    "        self.highest_tracc=0.0 # set highest training accuracy to 0 initially\n",
    "        self.lowest_vloss=np.inf # set lowest validation loss to infinity initially\n",
    "        self.best_weights=self.model.get_weights() # set best weights to model's initial weights\n",
    "        self.initial_weights=self.model.get_weights()   # save initial weights if they have to get restored \n",
    "        \n",
    "    def on_train_begin(self, logs=None):        \n",
    "        if self.base_model != None:\n",
    "            status=base_model.trainable\n",
    "            if status:\n",
    "                msg=' initializing callback starting training with base_model trainable'\n",
    "            else:\n",
    "                msg='initializing callback starting training with base_model not trainable'\n",
    "        else:\n",
    "            msg='initialing callback and starting training'                        \n",
    "        print_in_color (msg, (244, 252, 3), (55,65,80)) \n",
    "        msg='{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:10s}{9:^8s}'.format('Epoch', 'Loss', 'Accuracy',\n",
    "                                                                                              'V_loss','V_acc', 'LR', 'Next LR', 'Monitor','% Improv', 'Duration')\n",
    "        print_in_color(msg, (244,252,3), (55,65,80)) \n",
    "        self.start_time= time.time()\n",
    "        \n",
    "    def on_train_end(self, logs=None):\n",
    "        stop_time=time.time()\n",
    "        tr_duration= stop_time- self.start_time            \n",
    "        hours = tr_duration // 3600\n",
    "        minutes = (tr_duration - (hours * 3600)) // 60\n",
    "        seconds = tr_duration - ((hours * 3600) + (minutes * 60))\n",
    "\n",
    "        self.model.set_weights(self.best_weights) # set the weights of the model to the best weights\n",
    "        msg=f'Training is completed - model is set with weights from epoch {self.best_epoch} '\n",
    "        print_in_color(msg, (0,255,0), (55,65,80))\n",
    "        msg = f'training elapsed time was {str(hours)} hours, {minutes:4.1f} minutes, {seconds:4.2f} seconds)'\n",
    "        print_in_color(msg, (0,255,0), (55,65,80))   \n",
    "        \n",
    "    def on_train_batch_end(self, batch, logs=None):\n",
    "        acc=logs.get('accuracy')* 100  # get training accuracy \n",
    "        loss=logs.get('loss')\n",
    "        msg='{0:20s}processing batch {1:4s} of {2:5s} accuracy= {3:8.3f}  loss: {4:8.5f}'.format(' ', str(batch), str(self.batches), acc, loss)\n",
    "        print(msg, '\\r', end='') # prints over on the same line to show running batch count        \n",
    "        \n",
    "    def on_epoch_begin(self,epoch, logs=None):\n",
    "        self.now= time.time()\n",
    "        \n",
    "    def on_epoch_end(self, epoch, logs=None):  # method runs on the end of each epoch\n",
    "        later=time.time()\n",
    "        duration=later-self.now \n",
    "        lr=float(tf.keras.backend.get_value(self.model.optimizer.lr)) # get the current learning rate\n",
    "        current_lr=lr\n",
    "        v_loss=logs.get('val_loss')  # get the validation loss for this epoch\n",
    "        acc=logs.get('accuracy')  # get training accuracy \n",
    "        v_acc=logs.get('val_accuracy')\n",
    "        loss=logs.get('loss')        \n",
    "        if acc < self.threshold: # if training accuracy is below threshold adjust lr based on training accuracy\n",
    "            monitor='accuracy'\n",
    "            if epoch ==0:\n",
    "                pimprov=0.0\n",
    "            else:\n",
    "                pimprov= (acc-self.highest_tracc )*100/self.highest_tracc\n",
    "            if acc>self.highest_tracc: # training accuracy improved in the epoch                \n",
    "                self.highest_tracc=acc # set new highest training accuracy\n",
    "                self.best_weights=self.model.get_weights() # traing accuracy improved so save the weights\n",
    "                self.count=0 # set count to 0 since training accuracy improved\n",
    "                self.stop_count=0 # set stop counter to 0\n",
    "                if v_loss<self.lowest_vloss:\n",
    "                    self.lowest_vloss=v_loss\n",
    "                color= (0,255,0)\n",
    "                self.best_epoch=epoch + 1  # set the value of best epoch for this epoch              \n",
    "            else: \n",
    "                # training accuracy did not improve check if this has happened for patience number of epochs\n",
    "                # if so adjust learning rate\n",
    "                if self.count>=self.patience -1: # lr should be adjusted\n",
    "                    color=(245, 170, 66)\n",
    "                    lr= lr* self.factor # adjust the learning by factor\n",
    "                    tf.keras.backend.set_value(self.model.optimizer.lr, lr) # set the learning rate in the optimizer\n",
    "                    self.count=0 # reset the count to 0\n",
    "                    self.stop_count=self.stop_count + 1 # count the number of consecutive lr adjustments\n",
    "                    self.count=0 # reset counter\n",
    "                    if self.dwell:\n",
    "                        self.model.set_weights(self.best_weights) # return to better point in N space                        \n",
    "                    else:\n",
    "                        if v_loss<self.lowest_vloss:\n",
    "                            self.lowest_vloss=v_loss                                    \n",
    "                else:\n",
    "                    self.count=self.count +1 # increment patience counter                    \n",
    "        else: # training accuracy is above threshold so adjust learning rate based on validation loss\n",
    "            monitor='val_loss'\n",
    "            if epoch ==0:\n",
    "                pimprov=0.0\n",
    "            else:\n",
    "                pimprov= (self.lowest_vloss- v_loss )*100/self.lowest_vloss\n",
    "            if v_loss< self.lowest_vloss: # check if the validation loss improved \n",
    "                self.lowest_vloss=v_loss # replace lowest validation loss with new validation loss                \n",
    "                self.best_weights=self.model.get_weights() # validation loss improved so save the weights\n",
    "                self.count=0 # reset count since validation loss improved  \n",
    "                self.stop_count=0  \n",
    "                color=(0,255,0)                \n",
    "                self.best_epoch=epoch + 1 # set the value of the best epoch to this epoch\n",
    "            else: # validation loss did not improve\n",
    "                if self.count>=self.patience-1: # need to adjust lr\n",
    "                    color=(245, 170, 66)\n",
    "                    lr=lr * self.factor # adjust the learning rate                    \n",
    "                    self.stop_count=self.stop_count + 1 # increment stop counter because lr was adjusted \n",
    "                    self.count=0 # reset counter\n",
    "                    tf.keras.backend.set_value(self.model.optimizer.lr, lr) # set the learning rate in the optimizer\n",
    "                    if self.dwell:\n",
    "                        self.model.set_weights(self.best_weights) # return to better point in N space\n",
    "                else: \n",
    "                    self.count =self.count +1 # increment the patience counter                    \n",
    "                if acc>self.highest_tracc:\n",
    "                    self.highest_tracc= acc\n",
    "        msg=f'{str(epoch+1):^3s}/{str(self.epochs):4s} {loss:^9.3f}{acc*100:^9.3f}{v_loss:^9.5f}{v_acc*100:^9.3f}{current_lr:^9.5f}{lr:^9.5f}{monitor:^11s}{pimprov:^10.2f}{duration:^8.2f}'\n",
    "        print_in_color (msg,color, (55,65,80))\n",
    "        if self.stop_count> self.stop_patience - 1: # check if learning rate has been adjusted stop_count times with no improvement\n",
    "            msg=f' training has been halted at epoch {epoch + 1} after {self.stop_patience} adjustments of learning rate with no improvement'\n",
    "            print_in_color(msg, (0,255,255), (55,65,80))\n",
    "            self.model.stop_training = True # stop training\n",
    "        else: \n",
    "            if self.ask_epoch !=None:\n",
    "                if epoch + 1 >= self.ask_epoch:\n",
    "                    if base_model.trainable:\n",
    "                        msg='enter H to halt training or an integer for number of epochs to run then ask again'\n",
    "                    else:\n",
    "                        msg='enter H to halt training ,F to fine tune model, or an integer for number of epochs to run then ask again'\n",
    "                    print_in_color(msg, (0,255,255), (55,65,80))\n",
    "                    ans=input('')\n",
    "                    if ans=='H' or ans=='h':\n",
    "                        msg=f'training has been halted at epoch {epoch + 1} due to user input'\n",
    "                        print_in_color(msg, (0,255,255), (55,65,80))\n",
    "                        self.model.stop_training = True # stop training\n",
    "                    elif ans == 'F' or ans=='f':\n",
    "                        if base_model.trainable:\n",
    "                            msg='base_model is already set as trainable'\n",
    "                        else:\n",
    "                            msg='setting base_model as trainable for fine tuning of model'\n",
    "                            self.base_model.trainable=True\n",
    "                        print_in_color(msg, (0, 255,255), (55,65,80))\n",
    "                        msg='{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:^8s}'.format('Epoch', 'Loss', 'Accuracy',\n",
    "                                                                                              'V_loss','V_acc', 'LR', 'Next LR', 'Monitor','% Improv', 'Duration')\n",
    "                        print_in_color(msg, (244,252,3), (55,65,80))                         \n",
    "                        self.count=0\n",
    "                        self.stop_count=0                        \n",
    "                        self.ask_epoch = epoch + 1 + self.ask_epoch_initial \n",
    "                        \n",
    "                    else:\n",
    "                        ans=int(ans)\n",
    "                        self.ask_epoch +=ans\n",
    "                        msg=f' training will continue until epoch ' + str(self.ask_epoch)                         \n",
    "                        print_in_color(msg, (0, 255,255), (55,65,80))\n",
    "                        msg='{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:10s}{9:^8s}'.format('Epoch', 'Loss', 'Accuracy',\n",
    "                                                                                              'V_loss','V_acc', 'LR', 'Next LR', 'Monitor','% Improv', 'Duration')\n",
    "                        print_in_color(msg, (244,252,3), (55,65,80))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "e155beef",
   "metadata": {},
   "outputs": [],
   "source": [
    "def tr_plot(tr_data, start_epoch):\n",
    "    #Plot the training and validation data\n",
    "    tacc=tr_data.history['accuracy']\n",
    "    tloss=tr_data.history['loss']\n",
    "    vacc=tr_data.history['val_accuracy']\n",
    "    vloss=tr_data.history['val_loss']\n",
    "    Epoch_count=len(tacc)+ start_epoch\n",
    "    Epochs=[]\n",
    "    for i in range (start_epoch ,Epoch_count):\n",
    "        Epochs.append(i+1)   \n",
    "    index_loss=np.argmin(vloss)#  this is the epoch with the lowest validation loss\n",
    "    val_lowest=vloss[index_loss]\n",
    "    index_acc=np.argmax(vacc)\n",
    "    acc_highest=vacc[index_acc]\n",
    "    plt.style.use('fivethirtyeight')\n",
    "    sc_label='best epoch= '+ str(index_loss+1 +start_epoch)\n",
    "    vc_label='best epoch= '+ str(index_acc + 1+ start_epoch)\n",
    "    fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(20,8))\n",
    "    axes[0].plot(Epochs,tloss, 'r', label='Training loss')\n",
    "    axes[0].plot(Epochs,vloss,'g',label='Validation loss' )\n",
    "    axes[0].scatter(index_loss+1 +start_epoch,val_lowest, s=150, c= 'blue', label=sc_label)\n",
    "    axes[0].set_title('Training and Validation Loss')\n",
    "    axes[0].set_xlabel('Epochs')\n",
    "    axes[0].set_ylabel('Loss')\n",
    "    axes[0].legend()\n",
    "    axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')\n",
    "    axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')\n",
    "    axes[1].scatter(index_acc+1 +start_epoch,acc_highest, s=150, c= 'blue', label=vc_label)\n",
    "    axes[1].set_title('Training and Validation Accuracy')\n",
    "    axes[1].set_xlabel('Epochs')\n",
    "    axes[1].set_ylabel('Accuracy')\n",
    "    axes[1].legend()\n",
    "    plt.tight_layout\n",
    "    #plt.style.use('fivethirtyeight')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "35f70802",
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_info( test_gen, preds, print_code, save_dir, subject ):\n",
    "    class_dict=test_gen.class_indices\n",
    "    labels= test_gen.labels\n",
    "    file_names= test_gen.filenames \n",
    "    error_list=[]\n",
    "    true_class=[]\n",
    "    pred_class=[]\n",
    "    prob_list=[]\n",
    "    new_dict={}\n",
    "    error_indices=[]\n",
    "    y_pred=[]\n",
    "    for key,value in class_dict.items():\n",
    "        new_dict[value]=key             # dictionary {integer of class number: string of class name}\n",
    "    # store new_dict as a text fine in the save_dir\n",
    "    classes=list(new_dict.values())     # list of string of class names     \n",
    "    errors=0      \n",
    "    for i, p in enumerate(preds):\n",
    "        pred_index=np.argmax(p)         \n",
    "        true_index=labels[i]  # labels are integer values\n",
    "        if pred_index != true_index: # a misclassification has occurred\n",
    "            error_list.append(file_names[i])\n",
    "            true_class.append(new_dict[true_index])\n",
    "            pred_class.append(new_dict[pred_index])\n",
    "            prob_list.append(p[pred_index])\n",
    "            error_indices.append(true_index)            \n",
    "            errors=errors + 1\n",
    "        y_pred.append(pred_index) \n",
    "    tests=len(preds)\n",
    "    acc= (1-errors/tests) *100\n",
    "    msg= f'There were {errors} errors in {tests} test cases Model accuracy= {acc: 6.2f} %'\n",
    "    print_in_color(msg,(0,255,255),(55,65,80))\n",
    "    if print_code !=0:\n",
    "        if errors>0:\n",
    "            if print_code>errors:\n",
    "                r=errors\n",
    "            else:\n",
    "                r=print_code           \n",
    "            msg='{0:^28s}{1:^28s}{2:^28s}{3:^16s}'.format('Filename', 'Predicted Class' , 'True Class', 'Probability')\n",
    "            print_in_color(msg, (0,255,0),(55,65,80))\n",
    "            for i in range(r):                \n",
    "                split1=os.path.split(error_list[i])                \n",
    "                split2=os.path.split(split1[0])                \n",
    "                fname=split2[1] + '/' + split1[1]\n",
    "                msg='{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(fname, pred_class[i],true_class[i], ' ', prob_list[i])\n",
    "                print_in_color(msg, (255,255,255), (55,65,60))\n",
    "                #print(error_list[i]  , pred_class[i], true_class[i], prob_list[i])               \n",
    "        else:\n",
    "            msg='With accuracy of 100 % there are no errors to print'\n",
    "            print_in_color(msg, (0,255,0),(55,65,80))\n",
    "    if errors>0:\n",
    "        plot_bar=[]\n",
    "        plot_class=[]\n",
    "        for  key, value in new_dict.items():        \n",
    "            count=error_indices.count(key) \n",
    "            if count!=0:\n",
    "                plot_bar.append(count) # list containg how many times a class c had an error\n",
    "                plot_class.append(value)   # stores the class \n",
    "        fig=plt.figure()\n",
    "        fig.set_figheight(len(plot_class)/3)\n",
    "        fig.set_figwidth(10)\n",
    "        plt.style.use('fivethirtyeight')\n",
    "        for i in range(0, len(plot_class)):\n",
    "            c=plot_class[i]\n",
    "            x=plot_bar[i]\n",
    "            plt.barh(c, x, )\n",
    "            plt.title( ' Errors by Class on Test Set')\n",
    "    y_true= np.array(labels)        \n",
    "    y_pred=np.array(y_pred)\n",
    "    if len(classes)<= 30:\n",
    "        # create a confusion matrix \n",
    "        cm = confusion_matrix(y_true, y_pred )        \n",
    "        length=len(classes)\n",
    "        if length<8:\n",
    "            fig_width=8\n",
    "            fig_height=8\n",
    "        else:\n",
    "            fig_width= int(length * .5)\n",
    "            fig_height= int(length * .5)\n",
    "        plt.figure(figsize=(fig_width, fig_height))\n",
    "        sns.heatmap(cm, annot=True, vmin=0, fmt='g', cmap='Blues', cbar=False)       \n",
    "        plt.xticks(np.arange(length)+.5, classes, rotation= 90)\n",
    "        plt.yticks(np.arange(length)+.5, classes, rotation=0)\n",
    "        plt.xlabel(\"Predicted\")\n",
    "        plt.ylabel(\"Actual\")\n",
    "        plt.title(\"Confusion Matrix\")\n",
    "        plt.show()\n",
    "    clr = classification_report(y_true, y_pred, target_names=classes, digits= 4)\n",
    "    print(\"Classification Report:\\n----------------------\\n\", clr)\n",
    "    return acc/100"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e7d27934",
   "metadata": {},
   "outputs": [],
   "source": [
    "def saver(save_path, model, model_name, subject, accuracy,img_size, scalar, generator):    \n",
    "    # first save the model\n",
    "    save_id=str (model_name +  '-' + subject +'-'+ str(acc)[:str(acc).rfind('.')+3] + '.h5')\n",
    "    model_save_loc=os.path.join(save_path, save_id)\n",
    "    model.save(model_save_loc)\n",
    "    print_in_color ('model was saved as ' + model_save_loc, (0,255,0),(55,65,80)) \n",
    "    # now create the class_df and convert to csv file    \n",
    "    class_dict=generator.class_indices \n",
    "    height=[]\n",
    "    width=[]\n",
    "    scale=[]\n",
    "    for i in range(len(class_dict)):\n",
    "        height.append(img_size[0])\n",
    "        width.append(img_size[1])\n",
    "        scale.append(scalar)\n",
    "    Index_series=pd.Series(list(class_dict.values()), name='class_index')\n",
    "    Class_series=pd.Series(list(class_dict.keys()), name='class') \n",
    "    Height_series=pd.Series(height, name='height')\n",
    "    Width_series=pd.Series(width, name='width')\n",
    "    Scale_series=pd.Series(scale, name='scale by')\n",
    "    class_df=pd.concat([Index_series, Class_series, Height_series, Width_series, Scale_series], axis=1)    \n",
    "    csv_name='class_dict.csv'\n",
    "    csv_save_loc=os.path.join(save_path, csv_name)\n",
    "    class_df.to_csv(csv_save_loc, index=False) \n",
    "    print_in_color ('class csv file was saved as ' + csv_save_loc, (0,255,0),(55,65,80)) \n",
    "    return model_save_loc, csv_save_loc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "6268a332",
   "metadata": {},
   "outputs": [],
   "source": [
    "def predictor(sdir, csv_path,  model_path, averaged=True, verbose=True):    \n",
    "    # read in the csv file\n",
    "    class_df=pd.read_csv(csv_path)    \n",
    "    class_count=len(class_df['class'].unique())\n",
    "    img_height=int(class_df['height'].iloc[0])\n",
    "    img_width =int(class_df['width'].iloc[0])\n",
    "    img_size=(img_width, img_height)    \n",
    "    scale=class_df['scale by'].iloc[0]    \n",
    "    # determine value to scale image pixels by\n",
    "    try: \n",
    "        s=int(scale)\n",
    "        s2=1\n",
    "        s1=0\n",
    "    except:\n",
    "        split=scale.split('-')\n",
    "        s1=float(split[1])\n",
    "        s2=float(split[0].split('*')[1])\n",
    "    path_list=[]\n",
    "    paths=os.listdir(sdir)    \n",
    "    for f in paths:\n",
    "        path_list.append(os.path.join(sdir,f))\n",
    "    if verbose:\n",
    "        print (' Model is being loaded- this will take about 10 seconds')\n",
    "    model=load_model(model_path)\n",
    "    image_count=len(path_list) \n",
    "    image_list=[]\n",
    "    file_list=[]\n",
    "    good_image_count=0\n",
    "    for i in range (image_count):        \n",
    "        try:\n",
    "            img=cv2.imread(path_list[i])\n",
    "            img=cv2.resize(img, img_size)\n",
    "            img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)            \n",
    "            good_image_count +=1\n",
    "            img=img*s2 - s1             \n",
    "            image_list.append(img)\n",
    "            file_name=os.path.split(path_list[i])[1]\n",
    "            file_list.append(file_name)\n",
    "        except:\n",
    "            if verbose:\n",
    "                print ( path_list[i], ' is an invalid image file')\n",
    "    if good_image_count==1: # if only a single image need to expand dimensions\n",
    "        averaged=True\n",
    "    image_array=np.array(image_list)    \n",
    "    # make predictions on images, sum the probabilities of each class then find class index with\n",
    "    # highest probability\n",
    "    preds=model.predict(image_array)    \n",
    "    if averaged:\n",
    "        psum=[]\n",
    "        for i in range (class_count): # create all 0 values list\n",
    "            psum.append(0)    \n",
    "        for p in preds: # iterate over all predictions\n",
    "            for i in range (class_count):\n",
    "                psum[i]=psum[i] + p[i]  # sum the probabilities   \n",
    "        index=np.argmax(psum) # find the class index with the highest probability sum        \n",
    "        klass=class_df['class'].iloc[index] # get the class name that corresponds to the index\n",
    "        prob=psum[index]/good_image_count * 100  # get the probability average         \n",
    "        # to show the correct image run predict again and select first image that has same index\n",
    "        for img in image_array:  #iterate through the images    \n",
    "            test_img=np.expand_dims(img, axis=0) # since it is a single image expand dimensions \n",
    "            test_index=np.argmax(model.predict(test_img)) # for this image find the class index with highest probability\n",
    "            if test_index== index: # see if this image has the same index as was selected previously\n",
    "                if verbose: # show image and print result if verbose=1\n",
    "                    plt.axis('off')\n",
    "                    plt.imshow(img) # show the image\n",
    "                    print (f'predicted species is {klass} with a probability of {prob:6.4f} % ')\n",
    "                break # found an image that represents the predicted class      \n",
    "        return klass, prob, img, None\n",
    "    else: # create individual predictions for each image\n",
    "        pred_class=[]\n",
    "        prob_list=[]\n",
    "        for i, p in enumerate(preds):\n",
    "            index=np.argmax(p) # find the class index with the highest probability sum\n",
    "            klass=class_df['class'].iloc[index] # get the class name that corresponds to the index\n",
    "            image_file= file_list[i]\n",
    "            pred_class.append(klass)\n",
    "            prob_list.append(p[index])            \n",
    "        Fseries=pd.Series(file_list, name='image file')\n",
    "        Lseries=pd.Series(pred_class, name= 'species')\n",
    "        Pseries=pd.Series(prob_list, name='probability')\n",
    "        df=pd.concat([Fseries, Lseries, Pseries], axis=1)\n",
    "        if verbose:\n",
    "            length= len(df)\n",
    "            print (df.head(length))\n",
    "        return None, None, None, df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "516211e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def trim (df, max_size, min_size, column):\n",
    "    df=df.copy()\n",
    "    original_class_count= len(list(df[column].unique()))\n",
    "    print ('Original Number of classes in dataframe: ', original_class_count)\n",
    "    sample_list=[] \n",
    "    groups=df.groupby(column)\n",
    "    for label in df[column].unique():        \n",
    "        group=groups.get_group(label)\n",
    "        sample_count=len(group)         \n",
    "        if sample_count> max_size :\n",
    "            strat=group[column]\n",
    "            samples,_=train_test_split(group, train_size=max_size, shuffle=True, random_state=123, stratify=strat)            \n",
    "            sample_list.append(samples)\n",
    "        elif sample_count>= min_size:\n",
    "            sample_list.append(group)\n",
    "    df=pd.concat(sample_list, axis=0).reset_index(drop=True)\n",
    "    final_class_count= len(list(df[column].unique())) \n",
    "    if final_class_count != original_class_count:\n",
    "        print ('*** WARNING***  dataframe has a reduced number of classes' )\n",
    "    balance=list(df[column].value_counts())\n",
    "    print (balance)\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0b66bd08",
   "metadata": {},
   "outputs": [],
   "source": [
    "def balance(train_df,max_samples, min_samples, column, working_dir, image_size):\n",
    "    train_df=train_df.copy()\n",
    "    train_df=trim (train_df, max_samples, min_samples, column)    \n",
    "    # make directories to store augmented images\n",
    "    aug_dir=os.path.join(working_dir, 'aug')\n",
    "    if os.path.isdir(aug_dir):\n",
    "        shutil.rmtree(aug_dir)\n",
    "    os.mkdir(aug_dir)\n",
    "    for label in train_df['labels'].unique():    \n",
    "        dir_path=os.path.join(aug_dir,label)    \n",
    "        os.mkdir(dir_path)\n",
    "    # create and store the augmented images  \n",
    "    total=0\n",
    "    gen=ImageDataGenerator(horizontal_flip=True,  rotation_range=20, width_shift_range=.2,\n",
    "                                  height_shift_range=.2, zoom_range=.2)\n",
    "    groups=train_df.groupby('labels') # group by class\n",
    "    for label in train_df['labels'].unique():  # for every class               \n",
    "        group=groups.get_group(label)  # a dataframe holding only rows with the specified label \n",
    "        sample_count=len(group)   # determine how many samples there are in this class  \n",
    "        if sample_count< max_samples: # if the class has less than target number of images\n",
    "            aug_img_count=0\n",
    "            delta=max_samples-sample_count  # number of augmented images to create\n",
    "            target_dir=os.path.join(aug_dir, label)  # define where to write the images    \n",
    "            aug_gen=gen.flow_from_dataframe( group,  x_col='filepaths', y_col=None, target_size=image_size,\n",
    "                                            class_mode=None, batch_size=1, shuffle=False, \n",
    "                                            save_to_dir=target_dir, save_prefix='aug-', color_mode='rgb',\n",
    "                                            save_format='jpg')\n",
    "            while aug_img_count<delta:\n",
    "                images=next(aug_gen)            \n",
    "                aug_img_count += len(images)\n",
    "            total +=aug_img_count\n",
    "    print('Total Augmented images created= ', total)\n",
    "    # create aug_df and merge with train_df to create composite training set ndf\n",
    "    if total>0:\n",
    "        aug_fpaths=[]\n",
    "        aug_labels=[]\n",
    "        classlist=os.listdir(aug_dir)\n",
    "        for klass in classlist:\n",
    "            classpath=os.path.join(aug_dir, klass)     \n",
    "            flist=os.listdir(classpath)    \n",
    "            for f in flist:        \n",
    "                fpath=os.path.join(classpath,f)         \n",
    "                aug_fpaths.append(fpath)\n",
    "                aug_labels.append(klass)\n",
    "        Fseries=pd.Series(aug_fpaths, name='filepaths')\n",
    "        Lseries=pd.Series(aug_labels, name='labels')\n",
    "        aug_df=pd.concat([Fseries, Lseries], axis=1)\n",
    "        train_df=pd.concat([train_df,aug_df], axis=0).reset_index(drop=True)\n",
    "   \n",
    "    print (list(train_df['labels'].value_counts()) )\n",
    "    return train_df "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "8fc60cf6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input image shape is  (450, 450, 3)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<matplotlib.image.AxesImage at 0x154c002b0a0>"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOcAAADnCAYAAADl9EEgAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAABATUlEQVR4nO29abNc15EdujL3qXtBzBMHkRSpgbIkWt3utlqvW2q/50+O8O998SIcfoMd7lZLem6pqaFFiSM4gCTmgcCdqs7O9IdcuXeBIiiKIoAi714RIMA7VJ2qOrlzWrlS3N0xMDCwcdBHfQEDAwMfj2GcAwMbimGcAwMbimGcAwMbimGcAwMbiumTvikiD+s6BgYOLe7XMBmec2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzE96gsYeLAQCADg1GMncXL7eHzNgedPfxWnHzuNCoGLYmd5B2/feAt3lzu4sXsT5vYoL3sAgLi73/ebIg/zWgb+BAgERxbbeOL44xAAV3euY2+1j2NbR3Hu6FkAjieOncezJ5+CmeH44gSObR2HwGAmECggguoAoBABBIYDO8DNg1t45dpreOvm21jOSzjue4sMfA64nwkO43zEKFr+8IsOVK8f//NScO7YGXznK9/BE0efwJntUxB33Dm4heW8wva0jeOLE3BzKMJLmjkMCoMCHqbm7nB3KARQhbsAcExaoAuF6YzbB3fw/p338PN3f4GD+eBBvg2HGsM4NwBFCwSCs8dP48SREziyOIqvnXsOkyjU4/2u5qhueOv6OzhY7a/5LMd22cY3z30Np46cxpHpCMQEYoAAEDgAgzvgRoM0w2zO3xZUUZh5/LyE4YYHBQyAaIGKQBQok6IUgYrg6u4VvHzlZVy48dYw0geAYZyPACqKrWmBp089iccWR/H8ua9CZQtHpm0syjasChSK4o4iYVQOhYtDRFCEJucOdYeKQF0hEJgDjvCOXh0Ch4iDv4BqQDXHLAD4Edf2e1EJVBEInwMOSClQVagooIKpxDUUVbhWXN+/jp+89TNcvnNp5KSfI4ZxPmScO3Ya33nyBTx54gkcKcfgUMzVUWeHV4FD4BYGLAAUDqeXC6NBMxw3oAAoAohqmKTQIFXi9zy8pzgAB6w6ZtAjwgF3VHMYBFDpxsmSkYjANYxVGWqXEgeBimAqQFkU7PkBfn/1FfzqvZewv9p/FG/tlw7DOB8iHj9+Fj/6xg9wauskVkvHwcqxMmBVBWaAuESuB6DXUwF3g0JQ4HBEvCoigCtU4uuqCheBikI1DS+8IYyWCcFcI8Q1tzBaCKo7qgNFNTw1Q2kFwshFACnta6UIIHF9RQVlKijTBFPHjYNr+OmFn+LKnSuw++THA58OwzgfEh4/fg4/+vr3cXQ6geW+YbV0LKtj5YLqUXgpEiY5SXgmiMM8wtcJYXDp8dpH4MCkYaSgcU6qUBgi14yQFeBjWfzbzQCPx5kdMAhUIk9N4ywSxgdVOAQQhQJYlPCrhvi+qkJLgU4FZaFY+hIXbryF3176NW7cvfYI3u0vB4ZxPgQ8fvwc/v7r38fx6Th29yoODhzLFbByoKYhIkLZAsdC0tjCUKKCCoDNCxFpXg+gV9XSWhuTKFRYfYXA3VsumsapDogbKkAzFv50GCPcsHBAaXyQ6HvG4SFQjWsWQXy/FIgoFpOiLCZgUXDxzjv477/7r1iOYtFnwjDOB4zHj5/Fj77+NzhRTmJvb8b+0rCagVUNo3BJT+lQLVA4CgsyYYIO8cg7PSLTMFqgfx+C0v6tKAoYunHCHWYClwiRQU8Md1REOyUD3/xTAEzCApFqXI9GcA1knpvXI1Ap4bUnQZkUsljAiuJfL/0Sv3rnnzHb6mG/9V943M8EB0Poc8DjJ87ih1//Po6WE9jZqzhYGubqPYyEoECxENAIDaqKotLaIGb0TgUwj5ZKfmTpPaMQa1HwEYfXyE1NaMXItJOHqgOV/0zDNGRrJf7jIjRcgVSHKDDBUaTCIeFpDTABVDw8KwzmQDGFVmDa2sJfPPVXADAM9HPEMM4/E+ePn8UPv/YDPKYnsLNbsVoaahVYDd90T0UUGYoGpVncEVlekAEEUZQpbKXMHq0SZMgLkNETFhchbDxy9CyZqba6EBlAzGchjqJJREhfLDD3dkjA4gAoypBZ+PjpxqMszNcnMK8QnTHphL94+q8AGH71zs8x2/ywPoIvLUZY+2fg8eNn8bfP/w2O6CkcHBiWy4o6R//P2dYQ0Wj0I4oqC+mkAY0GSgSmLpiY3zntwD0KQzCHuWN2QY3OJiRzSwkSwQRvOSdotOFAWY0FYF6j6ivOSi7DXO+VYfcMmenZJSq5KhoRQFZ34fH4GhXcxVaBLiagCN699RZ+d+k3uHz70n2ZTgMdI+f8nPH48TP4++f/Btt6Crv7jtWqwqrBrDktsn60NftFgCJRYRX3FlayvckqLhg6Ar3yGh5xhmAOa6KJAi4a1Vd61nyseIyozBbE7xh7LuE8He48HOhqwzEae5v07irQUsJY3Wmk4dnToEULtGhrtSy2Jsy+xMXb7+LlS7/F5Q+HkX4ShnF+jji+/Rj+07f+HifKadzdd+wtDVYrKXPsHRYBPG7yIgxbW4gYeadZcHVM0AxJLMJPpxdjFAkwL6wAebBozB9VYagqsCjGIn+raBIJDP2TJs2PfdAMceMh6X2hJCoAmkwhUagUAMFGSoKCsL1TSkEpBVOZUCZAJ8XKV7i2ew278x5ev/oaru9cw+5y58F/SF8gjILQ54RJC7731LdwajqJ5Z7BVoZgsilzPpZaPXuJ8cYHtS68jVmvsAoQfUg4KvM43uvgt/i1+FltjZT4HRNpFVuRpDSE8UO1PXfrbSICXYdhXqPgGSrcImA1Gm3hc4pFxbYCmGGYEFFBlKOCpSRrua6ZQysgK0XRgq889gy8CL5+7gVcvXsJv37vl7h29yp2hpF+Iobn/BMwacH3n30R3z33AuqB4mC/Ym82LD08GtgGEQCTSnv/CgtDzjGtrLq6A8XD4JLzaqDnZW6XtZ0o9zDXdAt2kEjwZRGHgHj/vNy95YpFaeyOHk4zzK0e5Hjjc7u1JwyvyB5okBSCPbRQjZaOG8NwcoIh7XW7O0qZMBXFVBSiAhTFYipQcXy4fxNXdq/id1d/j0t3Lt3XexwGjLD2z8SkE77/7Hfx4vkXUJfAwYHhYBbMlcwb8/RfAJjnCce20qsgiebxc/nTSs9pLOII+bbCHxBVuFmr2LZflKi0mme1t9kVLPuV9JoqJZkOKAypwziDweR01x6x7ho5IogPqgxjVbBQ6WUmN7gbDx72cUV5HYpJg9kkKpCi2JomLErMj6IoDnwfP33nJ3jt6mstJjhsGGHtn4GFTvjrZ1/Et8+9gLoULJcVq5ljWZEitnCz9mCVManCPLwbkvHja0YEABIeVOCRT2YIKlEwEvol/ihMOV1CL4gMMT1/HohiUhwOMY1ibN2APVRjHxMkHbDd4mjFHiC8poiH4UkYu5MI4XkgefRPFWGoIvG6g/qgfarG2KrhKFqZJjy2dQzfe+rf4b3b72NnefeBf5ZfJAzj/BT41hPP48XzL2A+ECwPKlbVsKoO+kdYa+1LK5JotPWbFSYRL+k/WWxh8ZX9RLSpksw7KyK7NBVITpZ4hqlhgO6kKSQNMH1zZWirDngQB8jqA2i26dHzQoLrK5keZxf1nvfDvZMiqnMqBgaXPKKMB0b8qeJQFsqqO9wLJuVkTTWcOXIeZ4+eHcb5EQyBrz+Cp06cx/ee+Ba0Knw21GqYHVhBsfTwSr6W60XhJZg2QLY7+C+v7YZ1hpjiRv/VIaysiCirpBFGuijDz1bCBZAUPkPNPBLgz9IbmsGrh6evMYTtnhXaCE2DO1v6qZCHimShKZ5JPLhGwutPPrBBUK0Pe0cODJhFi2k2x2xhzLPH34YwVtUJ3zz/rda+GQgMz3kfFFE8cfwcfvjcv8dROR455spQLW7E4LAy82LhRNIIRXqrgo/n9Gri0SN0VlkL0LxQtFbC+MwBtcr5zdaFhIqGMdKAFfErNY0d0kbJciytZTRrNh3hNXm6ou2wiBnOyHshEdKK89Bp1WChcTN3Zv6b11iCuQAgKtmGbCV1okTltRcEuWF7OgJ8xEMfdgzj/BgsyoR//8x38c0zz6HUbezvVcyzYTVXGL2kgt6BkxvqWeLhLeZoninrN57D02BIK4pqUfE0hpMF7FtmgUbW8lXxaGtYcmzppjJkztAWfdRMkvkDEhVIaIDza9l7lTDqcKSGSUorRiVDqQ+GC1x7FVk9K9ACrxZ9WrfmVbO4tG57LhGqG9aIEwP3YBjnRxCG+SK+c/6bqAfA/oHFZEnlzeeZ80m3QoSjUN5kngweoRHROzhDzviZvFe7sUOw5o26qYcxRx7rYPUV9OBa7q32kRSQhtQYQM2YI/QNQr6yxUMDYlSpGpMnSo6vtWiTVVwmlJMmgYLaRBbFqvT0afyq+dqDaMHp7jjoOOS9nhoMBIZxfgTPn3ka3338Baz2wzAPlh5FD0fjqKZUSNy8CC/kGdcK0EK+nr5l4aVIhq9RTsp2lWc4DKFbDaMGgKJAZLfd8wof2EkLFOaNwjA1Bb8EEv1XcJ4zQ1igcXjDQ0pyFiCuVEOgPybHFp5/xwEBzqUCjpotILVOSTQnbZHeWUObyLKPqwIUQVXHpbuXguM70DCMcw1PnjiPv3zqRcwHwP5+xf7KsLLQGiitmRE3UPBkpU1IZtSWIZqBMiI54kWPKPy+th4yCQrMU7ON0sJhei9AequEIbMyh8uKLmkO8ZzoLRWPv9Yqud50isDnEkmqX/xmagw5e7AlLgauCvHMH8MgCwAxD6/pwqJU/r/DYPTGYZCiglIUi60CnwwvX/oN3rj2SjvQBgLDOInTj53Ej772AxzBCezsrmIm07pRgZ4hb+A0hvB+6De/5WhXFEOKhBfqzZYs4zgfU5iTeWMHCVsZYSAMLWmcTB7v8c6cBlszNmV7JtsZOR4WhAf+Fq8hCAIKQfGufBB/pSeTdg3Rh5VWNS58TUUjeDeP9ohAMGuGzI5JI6RVFUyLCVvbE3b9Ll5642e4cPV11DFi9gcYxom4uV54/Gs4tjiBvT1gaUAF2xbwELcSYdsj+nPqWeyxtckRtIJRGkQSypW/D7AY4p0va6yydj5tGm5cXeawKoKKJKpb9C7J69V2aLAvmn1LWZcnQfwryRBJuG1Fo6iyKkiqEI6nZQ6cc5+SoWlWhSmL4sYoIB520ogGSlHqFCnKNGFrawEUxysXf4M3r7w6ZDbvg2GcACCCU9unMa8Eq7m2AeUCATS4qeptbqP1IVM1nYw3UuKyOhmEuiz8hL/SRhQAb1xRcmsF6M8QUHopmCP/KYImhZmPp+xputu9/FqkJpHyeq2RIEQKYICUtdxV13hIPJjcjZ4136veg1Xh+9IuM6q6ImGYySOeNKZ0ShGUhWAxAe/dvYjfX/rXYZifgGGcAE4dOYkzR04DFa335qTviHAu03uPr3fawRaCY2XevKEzyQvNHz4ICQRZUBEWh8IOJhaAENVWMG9z8GekNfyTIQRnH5GeFfGUcaHG3iU+ouInXbwrq83pPAs4HtbyaB4ezHcrry2LYCKpexvXoWKd5SQaB5jELGjoghWUqWCaCnaWt/E/L/xkCIL9EQzjBHBk2sZ22YZVYAEAbHmIZlHFaJPeUzWmolmQiaKMk20jKOwTAlnEpXKAxERHJ5bbGu82bviUDYGEVhDSSLw38en/SEhAaOEm+aG1YUozPmPxJvNkb5VfoexlVqKykksVBu9SJvEjwoNFSIiw9jNp1FGZpfEj1OyLIEjwAszzAe4c3HmQH+mXAoMvRWT4qXAs4JgicwvvQ+5qdWB2p5peZH41qXCtcZm5KCjjgdZPaa3RtUJQ0POo7dOMCvSuIIsH5NCixbzpFR2djVT5t3m+Honfb70caV5TlVKX6UUR12gsGDm9bL4WkAxRM6f2oAFGIcxj7Kz1ZeJvRSoxCCY3qEf2e2r7JF44981B1/sjGJ6TKKDSHIsfQq9WOQ7lmfd593ZZQY3WhDK5bGVbSJLzJIs/mQ9qS+tCxUBgohDzxjoqiuDEZk8SWdjBPbzYnGrJjNWzCgyHe+Xr8RYuI39eotAluubeM0zl/GdhTpxFK4O3jWXwGoR7vubIe72R4nvmnP+WVs3e1gn/4av/G46ULbxy7TXcHUPXH4thnOg5l4hg1gmzzHGvWogy1/RK0u1vordRLWvqdQyF6RWT3+pWQy19LeQT6TdvkgeKCtys3c41vRZD13XyQNLvsnep98pF00eGoWYPNXJPISk/K0zaikGhXRthbgg3GL0l/adx3M0MJoBICWYQi0E1qYFiEfYztDUAswClGhYeUzLbZRs/evZv8e3Hv4PfXvk9Xr/+2lBG+AiGcQKIdgkQTkNQnJMe4vQY9AO0J22jUd7zT6D1/mJqg2GrgvtRcgqyG0k+N4BGiI9dmd76lJ13mp7RmvIePPOSfNT+2EVCgLp6EALCFrUdImvPTBlMUOsIJBkA4FC1m7FFJAA5tW4OEVt7RRlO39NVZd4a70mxcMdiNfaHLgrOHDmLHz73d3jxye/iFxd/jgs332JebYe+kjuUEABslQX+8wv/Eee3HsfywEJJzx2zVyyrt5nFljMSwkSybe9Kj0lPOhWGp8y/IiXT8FJrHtatZvGXqV/kt5atmnYQOPPCGA8TSB945hSJu6EgKsHRF41RLgiCPuepjKBIZcDFVJpsJ80bpuCcaPRwva6H2DF65o4maK3C4W0Wq5T6vGTsYdJ4jkkFWxoRh0wKKcodLIr9eYm7y7sQGK7evYp3b72Ld2+/i93V3sO7GR4BhhLCJ2BlM1Y+R9Uyb1AzKhzQSyn6YDL6oDQE0MLWQX6XPUJhohgTHGTZsLEPj4JMLwB7K+70SQ0WX5B+0xpjqECjINPaF/mzWYUKCRLPCisLOo02uJanqhvUWF1ViZzSlLkvlQAREzSz5/tgNFzwUGCRiBFARBzzGutIG5MqDx6pFqG8h7EvdAvnHjsLEeD8Y2fx7fMv4Nrudbx2/U28c+sdXNs5XMuSRrkMiBvGckWCsgcYw83JgTN6MJfM49aNKGcTgUliP0kuk49WR6/CSiMC9JlMINUT2ELx/hxSyEdFGlavrKYCAkSbcWSYG+eCwK37+vztlOAUy9feK7yt2svHST5scmJbk5Pfj8JTFs7S5g1utZ1eLSBf69XmwRFrCfm1ajHSxqFwmOLMY4/jB1/9O/y7Z/4a29P25/u5bziGcSJu5devv43Zoke5KFSbuyfakO4RKQpNf9A1aUUiVGSuZqIwLqJN75hh6+zSb1KEd0vpkjQg1eSssleItA0utGXxJknsiihUTdIfJ0PiEPqKPylFIhHftnyxeq5ZiNy2UoAlDw13QUxP+9orT30jNp54gOSoGNwptJ1RQbKh1kbZ8o2mkc8eqgmrmsoNjm+e+xZ+8LUfYtLDE+wN4ySWtoIyB4IyBFNKgyAlNdLpxQ1f4GzGt8Ine5XU16HItLNFUj1ztU4mWC84Ne+qkYdN5PQu+EdYaFHJka5o6keTn19juJrKCBOAhXKIOz2ZdCWG1p5F5LJxLUFRFMatXiuMVeSS0ina+7SaW7YRh4wzr86DBMK5VqYMSW/Mtb6evFygD4R7quJHnO+u+MbZb+HxE08++JthQzCMk/jw4A5uL3fgWsI4izbvc29gGH9nblipP5tV277aIL6QSnsV0ahnwZPkgFjZd686H+jhSKnjATDRUy40jFHhzQAKj4/cr5mE+hjNAoed2+nRQueUJmmHBRwzDNUMVg2oUQjKFRMKaUWpVqjiW1LSoIWjZzwsJkWjKcaQ99oB58IBdl4PGKpbhLe1xh93h5vj6OIozh49+2BvhA3C4YkR/gjuHuxgZ7mDk0dPsZoZ2ZIKNWA9yzLZX+wzk1locUcLAdODJXPIg+UWK/k8+y9dBczoVSJCjHZO3OzextYKBOD/z8Ick1VVmGBG0AijjQJG4uGP+3zJWnWQGkLVHa5d7V0l+rtZHEsDzJjAkaXlNdIB20b5HoTcSio8OKBKbx5buIsBRSPBrVmsQm9LId+mpDPWCtfkLR8ODM+5hgLHtjimNTHoyJ+Y76HpG0CloEiJNoCEV2llH4aXKU3S6XSg8YWH69OS6ZDYq3RvOe163uut2psk9RydppdE5JzCtkmwlqjfIGE5EZqGoWs+u6TMGOVPfI2J5E4P5k3BIbxtb63ka85lTYKYQNGWh6PlltluqQ7M1VBrhdXavGStFqF/jgble8FfeubE04cm7zwcr/JTQpTq5pgjTCUH1Wv4x+SdtoZn8yh5U2qT+rBGbUOrWuYImYEr5EG2UDxZeEI+Zm+/53RIrw4LN5UBZC4htXnygMhjIojp98yScotYtleE7Rxye0JKE7npM/pAWQmueVqwLJuHl+dbwvdFkvPXTx2IeHh+d1Z/4xCyahECa1xD4cFVmQ8XrN2k7jhz9CyKTodi/+fwnOsQwVpDINojDEFTfd2AqGJGJMkeYB9Ezns3izyqws1bcUO2NX+Wg8to6gNpx75m+BkBqzuEqw/maqjsd8TwdxSxyhTlXRcNb8iqcWofWXpJ5tVhKxU552neK6lJdFhVQ0VMxzgi91MadRwWobaXHrLv9WTenJ7bs10kEDNIrdTABcQrbJ7biohwkoIqGtRJa9E+jm2fwOPHn3gIN8Ojx/Cca2j5JLKamn94g5DcbRaljahaFhpZkuWlmfcf5myZR8YXDKGoUBHeptCbKI3TmxeT6P3Rm7TprvR+jS8bUiFBmHB6QRakmtdX9jGZMzNUdRpmXm7mqBbNSEyaukNBdayGNf2kdjVNxS/1aCPfJiXR4lWDht3PoK6ewDcRsva469HzkbKN41vHP5fPe9MxjHMd5r2Nx1vHRGEWgpaZo7lo7P4ASJzlzzqDPM+cr0V17abr0h85EpadxOTExmO00S0a5GzOaRnvhwDDxvjtVAQMT1Q9Ck6OkKSEFkw0gNiVEjd+pUtK351BsberzLfGkQPhbsI3SVr+HC8xflN4rQYu202FBc9BbW0hv2XVGWuD6pLzPHz4LDCBag7rifiXGMM41+COtpwovVLekEm1y+CztRWwlm9hTZUAACQNtI9RRRjIQhPQh7bRvVwziszZkIPPLYFl64FlKw9h55r8uGTxWIzCZaicHYz2PGTpZBIpXPQLhERKEv+BrtmbCgf5mGZg+JpPLf0aWeUu2pUDIwiIPBOqPJxSxKxXySFZ9EJ7vurGMHkY56FCDnjlCFXMauZNIn1kbK110G9G0Ftm/Mp9lK22K42nK+SpimTI2tmwuToeECqtW+uBSiOiO71lL8S04JAiY6FUwPg3Q1sWdcJ7x7W14pMHcX9qledgJOWSpGreCkXqbCMJmvd1i2VFzjWBgpiGKZqFIe8JtawpNsBowD3v1lT00xJRAwcEoiKdu2Lkc//8NxHDOInzx87h/PHH26iXFoFExYf3Qh+EjlAxqo6FlclYvZe0NQp5Ze6aTyKC4k5SnGQjLyZe1LuxSxpVD2/XNBKaUWiGo26AZc81mT+0BumvIX82fX8+buSJlMgUwRR+iiFmOvDcHyprrZ7kG2tkmNJfpygPOiTJAU1j6J6ooWWdiKKWxMEQLpT7RZVzaodMOWEYJzGVBaayAOY+GqY85WOjtDMkA8DGeBZnShujRmtbZBYXPUNplV6FNbW9DFWdOWNFUux66Cpuazs3+bXMzcg4cukeNQtIXaEhXkwak0nr7DRx6mghdQJFfC8MI4w3ehp9PK23OTJayOVMjWMMhrTCXJU0yHx52X5KBQkDi2Ha6ZDtvQfQdiNCsfZufKlxuI6iT8C1nWu4snMtwi5OpxQOJzdCO2JKJZkrmWPmNuhUdwfQ1rsD0j2cZO4njf7W0kP32JCNnvdWFp8EnebW/kiXoqxm7RDIcomIQEt+vGz+04DMQcUFrvNDXHve/0YucJAaenW46DqhHpwpRZtYkfxdS35s6C+Z53wqv0alQmtD66A1Fr5myQIAklEcrz/ej8Mygj2Mk1jOy9jXoWB4Fzdj0XWvZUjCQOR14T1CVjKsLPV64t8KaGlTJEovV4UhG2/zkoUPRCUVyCqvQDTZOwKRwnCRl5MHBA0iBb/yv+sUvMxlU50hx89aIcpjqLrWWElf45SIMHjtoAGSpyscCsg+JsfL1n7OOAbGkc/uXVXYag2CfNHOB3YIKjT6nNb1gMHnjAPpcJjnME7C4Xj16mu4dXAbUylcWBsT+7kTpYBzjc4Qlj1JaSFX/PHWBgkPF+Nf/Ir3BkVmkQYLL8FlRBmG9rWC3vJeBSdBgFYUSiPro6Is1LQil5AUD/50vF5bM36wYFRJPqhGT255zbL2u3EMpM5t5toAq17wVlhuqwCRRwbzec22UHynuKEgVBxSrwggZZCRSlHg5u5VvH/r4oO4BTYOwzjXcGP3Bn7x3ktw1Fgiy0oiEKd8SElmPghAgmw+Z9jFmDJDPnOPfZW0GhWGiTQ2zemNlu31QlBWTcWt5WBFU9Iy/hjo1VJNgBTD1PIxiwkTz78Z2uasZY64pcas848BmM2wqoZV5qnSX5t4tm9orNJnQpuOUKs8K0zoDUUgJSZ+1vPZzMNjxQUPqrhaJDc5IHj96ivYPSRCYKMg9BG8e/siru/fxLnFGZSimMwwS9wqVRziIaKZd4xl+Jm5mHnMhIKnPf8Wkukb2Z3eIGQ9lJ61e8L8b/yMrTGB4v9r7aVUB+PXbHOo9kcxC/V6pDC0c7WDQtsPJZWPrRbOUlor5KSHy7mUyMWT5dPEpiUr22iG6eKNhJ9RRCu2eVxf7u0UWF9BIWvVXYnseH/ex6U7l3qe+iXHMM6PYLaKGRYjVMWBVS92BL+N629pNGGRtRUqGH1Gf46P6ayqzgDg1A9Yz53I8hHNnC4Lr1kOAVsgjljD0EPTRoRALsLl/0sPes0B4/hWHBBsz0juRiHbaS2/dBZukjfbKrxAk//Mw0LSOFXaNadcSptUkTzEkmPLqRrPQyWefxZFKYVFOPZii8InwWuXXsPVu1c+vw97wzGM82OQvU4RgZcSy4GssuZJ/iii/dF4rp6UOGk3eKxSkHuK/+o9H1VHX34LDmazgJO5Z1qF59/MxbIabAh+rCEb9prkIABoG9JyFhQMvx0xV4nMFd1ba8WQvVBWptf8uVBjSVjBTj+akUNrkXgYtoOEezhUS/OKofxnUchiQUv4/VBqQBjoFMp81/au41cXf34oplESwzg/Aodjb95rhZs0xok3lUEbAb1mEsbfS28kTaqvty7QiriZn+Xj03dJ5GaS7skl13DG92n8bkbiT8he0nqa8XctI3pdJs3CPLCmQXtcda/cdI+awmEm1kLrJLmHETE0VXDvS3pGZCwe7wWvKkgOU+zmVG8HiPNNUcR7aWbQMrX3XTTIDKKKC9dew87y7gP85DcPwzg/gmoVF25cwPMnn4GoYCpozX6Is7encGGhhwboWKP7IcPM+J3k1goUrs6bP7xT4+mshcWhNJ/q6V0Nr6+BANKfiTCTlZZ2AiSbFxZdzCrHsayr7AGs1ypyUC6JC8bt1EkEUFaL0uCVhZ1UOlAWzxJCA40xsXimScioojGbhERLUA6pOFGTkBFV34m56sG8j/dvH44K7TqGcX4MzI2hmmOGo6mwM9zKkE9UOa3Rix5gJVShoZ+jcaO1mc+WF9Io0lgsvIeIcBcov5FcXy6mLUhN2B5qZo7qEBoKmr6tCuCmMMwUKKPINAn4mdcFmSFfv7NII414bs7eL8kNoU+Uy4pYwGK7RhF5dikK0TC2kup/EBTPcBZh9Ei23loIgMx7gaXNuLP/4cP6+DcGwzg/Bvfkji1cFbYs0CYsZC2shFuoKHCUohTFQgARo+NlRiYOMQnhLPd2Lxp7i1lRKghBr5afaoldn+mts7fB0BNSGkk8VfugEtKScEAKifT5+rSFtm2lPcP1aCGFt8/RtFBu1+ZNJ9W4ngji0RRRGDG4MN8NyQNS8/h6Q82MeXp/P3NetI3FSdIcD0t99l4M47wPUnBLoYDVGDoWhWj04WKBDz1pRr3IUagSy2JVIKiUe0TrEwLGPmfkkG1rmGQEHXntKiNbhsciYI+RBSHmfsbf7WNsXE5El6rOdgykVWQFEnKXTbdW2g4WlVgTn0ix6mQDNt4wcs4SLCg5oCyb8YBq3Ic8w5DTLvG6ZmMVTPI1xGOZoy2SUplwdOsodkfOOUC/wh2c9JqMPxXAxLs0b/g2KqaRf5ai3OgcN7xayDOHrEdOmcQztakNcPA5K7VAk0zJpshsBkbJJIWjzY+G2l2E2oXX5/3UILqBG4Aq0g4WgNM06aWke/rCcFYyn5YYI8vpm+xP1nw6ycIU1toz4YUjZ06CPFcHYk1Qu1W9uWvFDUfLETx16hlcO0RtFGAY58dDevugGSSAmbmXoFdMG3VNkjOqjX2zvqckjcSNj6wCqVkc6otoo/0grTEfzJnsUdKYG3mBzwvc03dE+/9w1bPHjW8KKCU6c/FSSqFo/j9Y9FnnFDNMbTQ8MzTxd4lceaLESg8jeg7L8Zvw8t71lVJFoVA5MBhRHOzmIaUC7C538cEhoeytYxjnxyG7C6S05NxysciDxJMC1wLf5m0Kx6+ED7TeYkkmT4alpoCgQC3U5tTy5kcjVrZwmR7aTZqwllgv/DSiAKju5+FdXQvEK5zznlLCqkICM8LSkp4f2TrioUBV9uxXZiEsnyeVEYRGHTFrXgNaHFtppaVdIxlJANsrnbggJFnExvDY/2k2Y3+1+zl+wF8MDOP8ODA2c3pAgUEt2vNs30Msws5wCtIocfSVCLPtMyIZyk4aJIBQN3BoznFaDY/GUDPJAaFV28fFAKoS5PdTqo/5bBDj0XqkUjhrakkor2Q8OYpLK8RkLD1JLLdt8izIjdXWPKvTu4vFlrBkF0V+HsWnKJwZJ1vCQ1d0OqDyEMucNadSkp+boXHEKaMgNHAPund0F46SFUwm8CpQr6gexPdspMc/hbOQXZvWJDY7C8KzOirMgBWNLYaS0RQMGCyTbC+9qiutWxqG18jl0gzT3WDGPDWNlNcQkbNSdSG7pWib0yZNGU3PYmoUaaQbT7KfkpCvGQizj+oKiFSS2GOyJeVA44DhhA2V341Sm8jX7lFpjq3eUYS7uXsTc109rA9+YzCM82NQPco3IiUKFmotnBRyTVUUVVq7ELnMR83JOw9PpKyiShE0rrpmIYY9Po0CS5RBAJoTaYNr9D/3XgRyyoawNyr0qOphLpWHgTFUFiVZoam2S9OXZZKNlD26d+u2U/MnQuWk9DE9bq8TmcfScCVZRkA/XHgYKV+DO1UQ1jYHCxPcrITLJLiyewnLunwwH/YGYxjnx+CDDy/h9t5tnH3sHFyBIqFNO5thhqA6PVHeQNIGQUjf626nzXMCfcKfxtA2fSG8stUIOYtkd5VyIEnFAyUq0XPAftdnlhvepvH++JyF11lVAAMmHiBROc0pmjh4pLGa+GfNA+dhoCxKVYlAPwwqnjMLRalXJKKk9a61TdxRvBeJ2rWC5AdOux/YEtcP2dLcxDDOj8GqrrBXD3ijMpcrkR+ZMQdEUNMmCgEg+aLOFQPBP2M6SAFnehw3RA8PNNqsTKo2ga6kwEkb+uc6wd77SLPuF+6Za3IeVHv+GmFx+KaoxwRTSJC92vBeklpa0snsjjyIwuhz63cqHDQSoCNUGRgdtP0xDC/WCtKAcFuLZyMFjY2U6oVaCm4ffIirdy4/sM96kzGGrT8GK5vx+o0LEepR0T29mDMf05z2kHvfxKYp5I55rpirY+YSoFod1ZJUztaLru3a5PhVkwWx7j37+HEMLluTBwnDmi0UDNoeUfQxr1bJzXYLC1bp4cWNezaVygZk9MBbTplLgbP9kZlxkn0rp0sEaOSHLF6ZA5Vto6wsN92hokCRtQ3eDin8ugJvXH0Fe4dkuPqjGJ7zPphtjhvb7qXLhffkvCJpacaVCq2i6NxvqYIlWxhFtKmwRxGGUZwCar1amno5bWkSfxaWITC9Nvs3DDSjZUFDEwmGETyLLdk3VIaWGV0rw1bhDs/MR1MkmqE5PFoyiIXB6YkrjTC9Zxp8snzQqsC9B5sV6wzVlRXpFEwrk0KK4Ob+Tbx65WW8cum3h7RWO4zzE+ChIkC6nNf4/whH6RGym+4II878kzlV6ve4cUKSfdDcai2SLRBqB3nwSmNRfRSJMk/NNso6LS81VFKZT3iYhBJ7ejiHqkHyUb2GgYKVZUtvj1YN7hAAEcIWenFN6h+J+CLCAyWGxduKiEx5Nby8iDaiQoTz9x4cDoTSoAgu3HgdP7vwj7i7f+fQGiYwjPO+WNYVZq9QKRFmcl9KqMcpxKypj+eGakHkksLxqDCu2Ajm7iha4KmkgD5XqVwxYGlwWTjSNc9jaaC9KJRNfeNjZPrrBgomsLprgEpUW1txlTlmI8LzEEKSGhie57ha+3nEYzkbqaEPlJS7/P6972WGq8p19Dl84ul504uLYOkr/OriLw7lFMpHMYzzPnj/ww9wa/9DnNk+3Ue9zKNS224mcEaS85juEKUoFdDzLbZbPK1B06uB1dysOsXOkzC40MmlwwQ0ZjxzT2YqtgNoo2ru1qh1cEAKq8nQkARhYcqx1rMEY2vmycLq7/pBUMkKkKT6adcCigOFpZ92QRqHgzBXLRrkBmRxuRtzSrhkIffdW2/j9t6tB/jJfnEwCkL3wUFd4p3b77aCTQoyg0WbuKcFLiGtkdVRqHCXJaJCiah0GrS3J2hEmaUG0yhu/OTIZnFGEZIfmrssGXqmwHN3rhz6JpuoIiY+2rq/rKDyMAimT3ZVqXjLwehqhmoVK2rYBmvHQ6dWlAUpaZM7UbEy7oORe15HbE6Lgo+w31QdWFkUypwynlDB7ryDX777zziY9x/KZ7zpGJ7zPjA3XN65AvOKotz5AWmnvvHvzKFyggUIk5vB4g/JCRzMCLqccqDa41HbBi6WU3IVYHOb7Ds6b/7oR3ahL+FiW8uWBNsXqSWbtHIRLiAysMADuFlko2ZYWyIRPcq1qq+xt6upIpjMntwtk+Eyx+aolc8DJmZYM0DosbW0YpAI8NaN13Fz9/oD+kS/eBie8xNw8fb7eP/OBygT1d8FWIhiQbmNIqFDu+D3Uuy5Ga9XxMYusoYkKWnRIy1wiigDygmXzL3UBQX3Fk80vTakUeJCfjM9q7dcMdM+hcZjs8kYHlOaV89mZmfzrPU2Aa5R6MeOeG2Gy1mWMFhF27dSWHkOCiKlSRxtp2lEGbEFWzR0bPdtF69dfvnQqLl/Ggzj/AQczAf4x7d/ivd2rkBLrA1Inim0G1ozBV9rKHjMq8SUB4ev+RhhVIXiVd3owgC18ViTIJBkCLTwF0jWjyIOBlVHru6jQwOQ86Pd83JLIGcnwwirh6dPQejqsfbdXfr1ASEi3Sq6mmlyNlsAoHGLBbFvpo2AeZAQoniVtIMonLkafv7OT3HjkDKB7odhnH8EN/Zu4n9c+CfcXe1Bi0AnabqtyrZGy7U4fZHyH2wwIJbSCraKYFF407bEFU17R4SGJg7RJibSwj/IGvEhDTkeIbwYubLhQYHsigI5usafZkitwk1h6GNn6T0jZ0bj2QabLlYeKq8DyNnMtg4Jwt0wELSFRRm2xmuIxRZaFtCpAEWxb/u4vnP1ULdNPg7DOD8Fru/ewGs33gg6XgEwATopShH+KQgNnyiYINstWa2UXMOQ/8/NYcjdnb0404zJDU1UGeBSJG2sIuQSoaIhgC1oa/5a4QcaBRfWnoybv8S7lysSsp8FXe2gK0CsadTmgQSyeHJhDFsqEZLzGBDhEDn4vd7NFC3RD1UNTeBJ8Na113Fr58bD/Ei/EBgFoU8Bc8NvLv8W29MC3zj9HLbLEQDRwjABVnPkcyqxmt5tjjdWenEnqHJA92JryZ1zptEtpkocnABZ3xyWRRdhXxTIh8xCi3lKnbBNIsb1DQCM86m5SLcVnDI0B5rINL+ZxZu2aY1fz0Ft1pdb09JJaoh/54B4bBorpU+auEZ7RYpgZ76L16/8Dua9/zsQEHe/byyxrkU6ENMp54+exX/42t/hqeNPweYguc/VMFcOCluN8Sr0XZlKVT5tbpA7KqUyLI417WLREK2+pisE5mkUWwaLSGnVmUvWVkXOAg7DZuZ9wuakqmKil03+a7RPwrCDzdR7oEVTpIvMpTh9+Jx8Y1itFVUSHegrJbSUpqKYJubYkSBDVXF3eRs/fvO/4/2b7xzqkPZ+JjjC2j8B1Ssu71zFf3vzH3Fl51rIVapi0sL8jwYhhT3ByMFIhImJjSzK0JuGWl9UL03COEDZzNlIAMiGvcTkipYu84F75iZ777Qi9oCmHlHWQB2OmeSI5j7bVi80HV60ZwXo11mZ5bUwXHVnXzSfOdkJ+begl7oLoihWgA+XN/DjN/6/Q2+Yn4RhnJ8BN/du4eUrv4dhZjuAvUtpq2QBABPDWqxFIAK0NQPrldnYFGswu3eZUPZRYbWJYBlX0ccfhsj0aDDvv29J0vcWPseqwF7NhaPnyeL9mtZyTzDMXn8VmStnXu3cYB15tzTSg5BQH7IsijIpru9exf/98v+F929dHIb5CRg552fE5btXYDbDpbBd4kg15bY8DGzAS/QtOE/SqpmAdNkPhG+qnmElWsU1CQVea1vnF8POYVjunZRgSBodK7XUCUoihFLSsnlAcOrLs+7rzasrjTQquwqgNqZQ6tN6e75OMFDtPc5kJGW/VgC8cfX3uL1384F/Rl90DOP8jKhWsawzpsV2i95EC5SKeEb5zBaSIhvwPVdM4xTSh4Lqp5ib0HTc7lYNk5P+ZmlAOR3Sc5Y0qNbEyULP2vND8hoAMgkae0k8xL3cwAkRb6FVMqK8tUaSGigtMMgpG3ZMwjA9JlkKQjxsb7mDyx9eerAfzpcEI6z9jLh9cBsXbr3dhoaLKCYPNtBCga2iKKLMKXtPM8nlJYqp8aWWMzo3YgfftjLvnM2xqh4yKcxHqyWbZ21jNg2v8N+0PVaCyeRJQwL7qpocXkES1aepYFEUohNCRylD88w3uT5wLQ/N8D4sswTJgjsaGg3QFXvzwSC2f0oMz/kZYe549frr+Pb5b2PSLYhX9g9T54MeKaf/IZg4kgzerLVWFouMIlex4EdYPFpvo0ThJnuIExxJGUSb/0yoAHM1uBQY4kPOdktMonUGEoBGLDCGsYXjZiXIwBFWW0QLcG9D31h/3uzjIg+be3hENFDgvdsXsTqESnqfBcM4/wzc3LuFa7vX8PSJZyLXkwKrgNfguVZk25CxnisnUshZZb5YWaSh6QY5Qbr+D9a+l6GsiGDiuvqYw4zToLLa2lcrSPPWsdHLk0PHopB3kgGLQWVS9mY9Dok58t0CxQyHWw/V46JSUFN6+MxQOVqpHo89Tbi5exN19DQ/FYZx/hnYm/dxdfcanjn5TBRzEPs3NdXvKhrLR7wrzbVFuIhV9MZ5ymzEZw4XQ95heG3NAtZGxbLIg5g17aQAejug8WeTESskCKQxqSi6ng/7oJM2vq+5RT93CZgabGWNYYTMQdnXieJSDIZ3o9c2ZD3DcFDHONinxTDOPxO/eO8lPH7sCTx94mkUdUitQV0zR4l9CeSyxs8nOWE9KEwBaFWFgyNc2R3JqJHeMn7eMGlpc5ypAC8tdrVQ8stNuUQWh+J5SbmjBKAWoEyCaZpQJsXOagd39m9DITj72DmITMAq2jkuxkFyJ5m9F6RyEEAlcmotYfgmig8P7h7KnSefFcM4/0zsrHbxj2//E/7tEy/i7GOncP7IGSx0O5YHkYierQ1IUOiyTbJuIEFVVUAMlWFjF6fuDf++izqMoSDIC22SJcNnahrZmmZIm59hpTgnTPpgdDznneUd/MNr/w2Xbr2PogX/9unv4dtPfBdHt4518kNNr84JFsqPZNGpkd8RxTIpgovX3sD+au+hfTZfdAzj/Bxw5e4VXNu5hiKKF859A//Hc3+PRdlGFYPVKIhkpGsCeGF7oaLR6sILGSZQ78ci5A0mjjVR6hwZozIucuolIllro2fZ/3T+niOHo7Na1Qs2qTULi52d7928iPdvXoS5YbYZL737L3jv5kX87y/8R5zaOkOmU1ALPC6mLXAqKgBV77UUoBRgq+Dq3mW88sGvB4f2T8BopXxOMDesbMYbN97CreWduDGbhkgY0ywaJALJyY7I8WLWswQPdZqwIP91odxfIkEPnBRY5Ewp0DxeeqrYZBbXEsrskfMWjpE5yQyCYPzAPA4PY1unRrX4yOIxLMqivTZ3x+U7l/GTC/+Ela+wtZgwFYFwwHrihE5ozSrKoqBMBWUSLBYFLobfvf9L3BoqB38ShnF+zgjFOgsj1CB7owic/Nlmr1ogpYT4VVFMJXm6goUqtlSwoKJ8zH9SIUEUU5LXk2wQTwxIbqBO+h3nMHPGs9dvWYBipXiNNugGPHHsKZw4cuoPXtvlDy/hd5f/Fa4V00KxKFE8WkwSRaTFBCkTdJowLSZsLRaAVrz0zk9x4eqrD/FT+HJgGOfnjNlmvHrt9VjpDrTVgAtOZxQNYyyTokxTV0FIrm3hDV5ydT0wFe78zNzQ+1B26PcEsycMkd60KJUXNCq0HqJdsxkqHDO8MZHMQjF+NoFXx5Yu8L1n/grb0/Y9r61axUvv/gt+/cEv4WrQRcG0KNjamsJAU8FeFYqCWmf8/O2f4rfv/xLV5kfzgXyBMXLOzxkOx6vXX8OJ7WP4N+e/g+3pKL/hUDMUjeqPWYhe5SylaPQtk3wgEkLOakFQ8Fwq652cIN6LPBkmu0SuqhItDVe0CZRcX2+ZYyLyVIVAi8R0SjVMVfDcma/h7dNv4MK1N+55fbPNeOniSzAA33nyL7DQ7ShROeCpvSeCaku8dPH/x28v/Xr0NT8jxjznA4KK4rnTz+Evv/KXePrE05ikQCxT0BwFc4gHMyc0iSLnyzDTuGPF3VArGbISfdEUlhZZ36QtJCgF2d2qwazC5spRtW6Us/frdIBeXbA1TZi2JpStCR/cfQ//8Or/i92P2VUy6YTzx5/Ac+e+jmdPfxXHFsfhHo/x4f5NvPzBb/D61VeHYX4K3M8Eh3E+YGyXbTx7+hk8e/JZfPPs13FscaxVOAVGIeqJ4s+s6FpMYMZyoPB8cw2922qGFdD6msGtTXWC3rqp7D9aTIDDzCgOjWhx5JyoAECEwSohubLYWmBaTCiLgl9/8Av8y9s/u/8NBMFjW0cx6ZSsRazqEnujZfKpMYzzEUNFcerISXz38W/je0+9iCP6WKizAzAtqGT6BJuotqmPeQ7VAZuZG7o1T6up30P2Ua3WSkRztjs8tIOaOoIHN6matakVQIBSGqNnsZiwtSiYFhN2bAf/z8v/J27tjhGvB4WhhPCIYW64uXcLP3v3n/Hjt36CvboH0QKokpBOrR+nOgK94qKEcl+KiS0mxWIq1M4N0nrkfLm+Lz/sMFynYXqGx26YrbLlEl8z9O+711Bj8ApYxfHFcTx/7puP8q07tBjG+ZBhbvjd1Vfxk3f+J5ZeAa5pcKPCOknqxokW974qQQqoggcqFihbJ2gDzhIVn6a4J0glBC7HbXYbciWzC8PnGmsLabBuabzAkyefxtZHKrcDDx7DOB8B3B1v3ngT79x+By5OT5n9zyS8A1FqRVNJbwp+FoWjzptleJojXyQmmBsFosFVgoYUGEm1hTwYrNa24tAs2ixRUTacPXr2HlLCwMPBMM5HhL3VHv7hzX/AhVsXgjxA5o97bMC2ai0krW6hjmdBHBB3qMX8qHEhUOVK99QWavtL6Imzn9rJ72GUMAsRawEcEfa61dC4rQZ1w9GyhSdPPvUo365DiWGcjxB3l3fxi3d/jr3lXW56BnKNu7gwxHRIzGHHMqGwmUamd8v2C43OKbzAruOk0lQSQDFnIyE9NWpTMaEPRUt7fpijyISTH8MYGniwGMb5iHF15xrevvU2nG0VR5Dj5+iAwKuv9T6zWRGhajWsTZUoyeyk+kXC2SruuSsU8KZDCyTdsI+S9e1lIZdZ3VFzrmzgoWK8448Y5oZ/fvfnsW4QKVsZHm1GLOtdGTWDOFDtToJBdarmUelOCzVGou85aa/gRohrzdNKGm5w/lAl922GioNLypEAe/Me3r895jAfNoZxbgB2Vju4sXM1ck5IbM8GkLXWWFYbu1E8uX2prBfyBuHwFLHmQPpmsPSICiF1MESmK4KMH8wkqiXIvaLRDsBVUH2FnYM7D/dNGRjGuQlwd/z+6ivYW+12iRIkDY9znDmjySFmKQophTlk37Up8JhKyeVGuQwTOdFSsNAgxad+rapiayqYpr42Ir4Ojn7el6cy8AAxjHNDcPnOZfz47R9jr+7GugWlakLhlIcK80Agx8Tarswkz/OxRLSNn02cipEShpcjaQthsagojkwF21MY6GLBOcwJ6as5bjbwsDGmUjYEDseb199AdcNfPv3XeOL4U1BX2FwbBdYt2US9oNMGrgFyZuPxUtArt2+vnEbKTFIhMTCNMGA09T3A3eh9gUkdF6+9jb3l7iN5Xw4zhnFuEByOt268iUt3PsBzZ57Ds6efx9MnnsHR6QiKxOyXwWEmLZmMbfbOVX+x8t1r9CuhGjKd6pjYIwXzysJRFpXYzwnuXNHQQ2mLbqHA/rw31sE/Agzj3EDsr/bw6pVX8MbV13F8+wSeOvEUnjzxJLbKAs+c/ioWegQLLexRhtHEbsxKPR+DeJLfsSZlEo+feajCMRUKhoWwLQp7pNYmVnCPgt/Aw8Mwzg1G9Yrb+7dwe/8WXr36ClQUx7aP4ZlTX8W5o2fhAJ46+RWcPHIKxRVTWYT8iQHADFiEp5QL4jY0FhokluIuqM7n4TIBBMe3pCG7o3BZ09gI9nAxRsa+oMhVCkcWRzCVBU5sncBXz3wVp46cwjfOPAc1oK4q5rlSxdLBLfUoCJaQlL5JrFWH1wl+7tCiuLm8jf/y2/+C3ZF3PhDczwSH5/yCIr3Y3moPWO3hzv6HeP/D97BdtnD5iW/jL558ESfKcWjR2JfCcFU4XF1Uc8kKw+Oej8KDHCHmqJKyKAMPG8NzfgkhEDxz6mn86Lm/xdmjZ9tECsSxqob9uofd1T7eufkOqtdeARbg9GMn8ZWTX4kCkAhWVvHSuy/hnetvj7D2AWEoIRxCHN06iudPP9vGvVyAg3mJ9269j5WtsJyXf/A7RQoWpUuOOIDlvByG+QAxjHNgYEMxZEoGBr5gGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChGMY5MLChmD7pm+7+sK5jYGDgIxiec2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQzGMc2BgQ/G/APDEFiPMgQSgAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "img_path=r'C:\\Users\\Vinay Wadhwa\\Downloads\\archive\\C-NMC_Leukemia\\training_data\\fold_0\\all\\UID_11_10_1_all.bmp'\n",
    "img=plt.imread(img_path)\n",
    "print ('Input image shape is ',img.shape)\n",
    "plt.axis('off')\n",
    "imshow(img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "ed426a89",
   "metadata": {},
   "outputs": [],
   "source": [
    "def preprocess (sdir, trsplit, vsplit):\n",
    "    filepaths=[]\n",
    "    labels=[]    \n",
    "    folds=os.listdir(sdir)\n",
    "    for fold in folds:\n",
    "        foldpath=os.path.join(sdir,fold)\n",
    "        classlist=os.listdir(foldpath)\n",
    "        for klass in classlist:\n",
    "            classpath=os.path.join(foldpath,klass)\n",
    "            flist=os.listdir(classpath)\n",
    "            for f in flist:\n",
    "                fpath=os.path.join(classpath,f)\n",
    "                filepaths.append(fpath)\n",
    "                labels.append(klass)\n",
    "    Fseries=pd.Series(filepaths, name='filepaths')\n",
    "    Lseries=pd.Series(labels, name='labels')\n",
    "    df=pd.concat([Fseries, Lseries], axis=1)            \n",
    "    dsplit=vsplit/(1-trsplit)\n",
    "    strat=df['labels']\n",
    "    train_df, dummy_df=train_test_split(df, train_size=trsplit, shuffle=True, random_state=123, stratify=strat)\n",
    "    strat=dummy_df['labels']\n",
    "    valid_df, test_df= train_test_split(dummy_df, train_size=dsplit, shuffle=True, random_state=123, stratify=strat)\n",
    "    print('train_df length: ', len(train_df), '  test_df length: ',len(test_df), '  valid_df length: ', len(valid_df))\n",
    "     # check that each dataframe has the same number of classes to prevent model.fit errors\n",
    "    trcount=len(train_df['labels'].unique())\n",
    "    tecount=len(test_df['labels'].unique())\n",
    "    vcount=len(valid_df['labels'].unique())\n",
    "    if trcount < tecount :         \n",
    "        msg='** WARNING ** number of classes in training set is less than the number of classes in test set'\n",
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
    "        msg='This will throw an error in either model.evaluate or model.predict'\n",
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
    "    if trcount != vcount:\n",
    "        msg='** WARNING ** number of classes in training set not equal to number of classes in validation set' \n",
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
    "        msg=' this will throw an error in model.fit'\n",
    "        print_in_color(msg, (255,0,0), (55,65,80))\n",
    "        print ('train df class count: ', trcount, 'test df class count: ', tecount, ' valid df class count: ', vcount) \n",
    "        ans=input('Enter C to continue execution or H to halt execution')\n",
    "        if ans =='H' or ans == 'h':\n",
    "            print_in_color('Halting Execution', (255,0,0), (55,65,80))\n",
    "            import sys\n",
    "            sys.exit('program halted by user')            \n",
    "    print(list(train_df['labels'].value_counts()))\n",
    "    return train_df, test_df, valid_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "beb2abf0",
   "metadata": {},
   "outputs": [],
   "source": [
    "sdir=r'C:\\Users\\Vinay Wadhwa\\Downloads\\archive\\C-NMC_Leukemia\\training_data'\n",
    "trsplit=.9\n",
    "vsplit=.05\n",
    "train_df, test_df, valid_df= preprocess(sdir,trsplit, vsplit)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c3cd360",
   "metadata": {},
   "outputs": [],
   "source": [
    "max_samples= 3050\n",
    "min_samples=0\n",
    "column='labels'\n",
    "working_dir = r'./'\n",
    "img_size=(300,300)\n",
    "train_df=trim(train_df, max_samples, min_samples, column)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e337f302",
   "metadata": {},
   "outputs": [],
   "source": [
    "channels=3\n",
    "batch_size=10\n",
    "img_shape=(img_size[0], img_size[1], channels)\n",
    "length=len(test_df)\n",
    "test_batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=80],reverse=True)[0]  \n",
    "test_steps=int(length/test_batch_size)\n",
    "print ( 'test batch size: ' ,test_batch_size, '  test steps: ', test_steps)\n",
    "def scalar(img):    \n",
    "    return img  # EfficientNet expects pixelsin range 0 to 255 so no scaling is required\n",
    "trgen=ImageDataGenerator(preprocessing_function=scalar, horizontal_flip=True)\n",
    "tvgen=ImageDataGenerator(preprocessing_function=scalar)\n",
    "msg='                                                              for the train generator'\n",
    "print(msg, '\\r', end='') \n",
    "train_gen=trgen.flow_from_dataframe( train_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',\n",
    "                                    color_mode='rgb', shuffle=True, batch_size=batch_size)\n",
    "msg='                                                              for the test generator'\n",
    "print(msg, '\\r', end='') \n",
    "test_gen=tvgen.flow_from_dataframe( test_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',\n",
    "                                    color_mode='rgb', shuffle=False, batch_size=test_batch_size)\n",
    "msg='                                                             for the validation generator'\n",
    "print(msg, '\\r', end='')\n",
    "valid_gen=tvgen.flow_from_dataframe( valid_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',\n",
    "                                    color_mode='rgb', shuffle=True, batch_size=batch_size)\n",
    "classes=list(train_gen.class_indices.keys())\n",
    "class_count=len(classes)\n",
    "train_steps=int(np.ceil(len(train_gen.labels)/batch_size))\n",
    "labels=test_gen.labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45192270",
   "metadata": {},
   "outputs": [],
   "source": [
    "show_image_samples(train_gen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be76b54c",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_name='EfficientNetB3'\n",
    "base_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False, weights=\"imagenet\",input_shape=img_shape, pooling='max') \n",
    "x=base_model.output\n",
    "x=keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)\n",
    "x = Dense(32, kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),\n",
    "                bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)\n",
    "x=Dropout(rate=.45, seed=123)(x)        \n",
    "output=Dense(class_count, activation='softmax')(x)\n",
    "model=Model(inputs=base_model.input, outputs=output)\n",
    "model.compile(Adamax(learning_rate=.001), loss='categorical_crossentropy', metrics=['accuracy']) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a18c5d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "epochs =20\n",
    "patience= 1 # number of epochs to wait to adjust lr if monitored value does not improve\n",
    "stop_patience =3 # number of epochs to wait before stopping training if monitored value does not improve\n",
    "threshold=.9 # if train accuracy is < threshhold adjust monitor accuracy, else monitor validation loss\n",
    "factor=.5 # factor to reduce lr by\n",
    "dwell=True # experimental, if True and monitored metric does not improve on current epoch set  modelweights back to weights of previous epoch\n",
    "freeze=False # if true free weights of  the base model\n",
    "ask_epoch=5 # number of epochs to run before asking if you want to halt training\n",
    "batches=train_steps\n",
    "callbacks=[LRA(model=model,base_model= base_model,patience=patience,stop_patience=stop_patience, threshold=threshold,\n",
    "                   factor=factor,dwell=dwell, batches=batches,initial_epoch=0,epochs=epochs, ask_epoch=ask_epoch )]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "af65bfd1",
   "metadata": {},
   "outputs": [],
   "source": [
    "history=model.fit(x=train_gen,  epochs=epochs, verbose=0, callbacks=callbacks,  validation_data=valid_gen,\n",
    "               validation_steps=None,  shuffle=False,  initial_epoch=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf5aff88",
   "metadata": {},
   "outputs": [],
   "source": [
    "subject='leukemia'\n",
    "print_code=0\n",
    "preds=model.predict(test_gen) \n",
    "acc=print_info( test_gen, preds, print_code, working_dir, subject ) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09616c28",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_save_loc, csv_save_loc=saver(working_dir, model, model_name, subject, acc, img_size, 1,  train_gen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4926bf84",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c14e1862",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}