[367703]: / .ipynb_checkpoints / EEGLearn-checkpoint.ipynb

Download this file

3238 lines (3237 with data), 276.5 kB

{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np \n",
    "import scipy.io as sio\n",
    "import matplotlib.pyplot as plt \n",
    "import seaborn as sn\n",
    "import pandas as pd\n",
    "\n",
    "import torch\n",
    "import os \n",
    "\n",
    "import torch.optim as optim\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data.dataset import Dataset\n",
    "from torch.utils.data import DataLoader,random_split\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "torch.manual_seed(1234)\n",
    "np.random.seed(1234)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Instancing on the GPU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Loading the images"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(2670, 3, 32, 32)\n",
      "(2670, 7, 3, 32, 32)\n",
      "(2670,)\n",
      "(2670,)\n"
     ]
    }
   ],
   "source": [
    "Mean_Images = sio.loadmat(\"images.mat\")[\"img\"] #corresponding to the images mean for all the seven windows\n",
    "print(np.shape(Mean_Images)) \n",
    "\n",
    "Images = sio.loadmat(\"images_time.mat\")[\"img\"] #corresponding to the images mean for all the seven windows\n",
    "print(np.shape(Images)) \n",
    "\n",
    "\n",
    "Label = (sio.loadmat(\"FeatureMat_timeWin\")[\"features\"][:,-1]-1).astype(int)\n",
    "print(np.shape(Label)) \n",
    "\n",
    "Patient_id = sio.loadmat(\"trials_subNums.mat\")['subjectNum'][0]\n",
    "print(np.shape(Patient_id))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EEGImagesDataset(Dataset):\n",
    "    \"\"\"EEGLearn Images Dataset from EEG.\"\"\"\n",
    "    \n",
    "    def __init__(self, label, image):\n",
    "        self.label = Label\n",
    "        self.Images = image\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.label)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        if torch.is_tensor(idx):\n",
    "            idx = idx.tolist()\n",
    "        image = self.Images[idx]\n",
    "        label = self.label[idx]\n",
    "        sample = (image, label)\n",
    "        \n",
    "        return sample"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### K-Fold Validation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def kfold(length, n_fold):\n",
    "    tot_id = np.arange(length)\n",
    "    np.random.shuffle(tot_id)\n",
    "    len_fold = int(length/n_fold)\n",
    "    train_id = []\n",
    "    test_id = []\n",
    "    for i in range(n_fold):\n",
    "        test_id.append(tot_id[i*len_fold:(i+1)*len_fold])\n",
    "        train_id.append(np.hstack([tot_id[0:i*len_fold],tot_id[(i+1)*len_fold:-1]]))\n",
    "    return train_id, test_id"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Mean Image"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Basic Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BasicCNN(nn.Module):\n",
    "    '''\n",
    "    Build the  Mean Basic model performing a classification with CNN \n",
    "\n",
    "    param input_image: list of EEG image [batch_size, n_window, n_channel, h, w]\n",
    "    param kernel: kernel size used for the convolutional layers\n",
    "    param stride: stride apply during the convolutions\n",
    "    param padding: padding used during the convolutions\n",
    "    param max_kernel: kernel used for the maxpooling steps\n",
    "    param n_classes: number of classes\n",
    "    return x: output of the last layers after the log softmax\n",
    "    '''\n",
    "    def __init__(self, input_image=torch.zeros(1, 3, 32, 32), kernel=(3,3), stride=1, padding=1,max_kernel=(2,2), n_classes=4):\n",
    "        super(BasicCNN, self).__init__()\n",
    "\n",
    "        n_window = input_image.shape[1]\n",
    "        n_channel = input_image.shape[2]\n",
    "\n",
    "        self.conv1 = nn.Conv2d(3,32,kernel,stride=stride, padding=padding)\n",
    "        self.conv2 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n",
    "        self.conv3 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n",
    "        self.conv4 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n",
    "        self.pool1 = nn.MaxPool2d(max_kernel)\n",
    "        self.conv5 = nn.Conv2d(32,64,kernel,stride=stride,padding=padding)\n",
    "        self.conv6 = nn.Conv2d(64,64,kernel,stride=stride,padding=padding)\n",
    "        self.conv7 = nn.Conv2d(64,128,kernel,stride=stride,padding=padding)\n",
    "\n",
    "        self.pool = nn.MaxPool2d((1,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "\n",
    "        self.fc1 = nn.Linear(2048,512)\n",
    "        self.fc2 = nn.Linear(512,n_classes)\n",
    "        self.max = nn.LogSoftmax()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        batch_size = x.shape[0]\n",
    "        x = F.relu(self.conv1(x))\n",
    "        x = F.relu(self.conv2(x))\n",
    "        x = F.relu(self.conv3(x))\n",
    "        x = F.relu(self.conv4(x))\n",
    "        x = self.pool1(x)\n",
    "        x = F.relu(self.conv5(x))\n",
    "        x = F.relu(self.conv6(x))\n",
    "        x = self.pool1(x)\n",
    "        x = F.relu(self.conv7(x))\n",
    "        x = self.pool1(x)\n",
    "        x = x.reshape(x.shape[0],x.shape[1], -1)\n",
    "        x = self.pool(x)\n",
    "        x = x.reshape(x.shape[0],-1)\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "EEG = EEGImagesDataset(label=Label, image=Mean_Images)\n",
    "\n",
    "lengths = [int(2670*0.8), int(2670*0.2)]\n",
    "Train, Test = random_split(EEG, lengths)\n",
    "\n",
    "Trainloader = DataLoader(Train,batch_size=32)\n",
    "Testloader = DataLoader(Test, batch_size=32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 140,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:52: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finished Training\n"
     ]
    }
   ],
   "source": [
    "net = BasicCNN().cuda()\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n",
    "\n",
    "for epoch in range(30):  # loop over the dataset multiple times\n",
    "    running_loss = 0.0\n",
    "    evaluation = []\n",
    "    for i, data in enumerate(Trainloader, 0):\n",
    "        # get the inputs; data is a list of [inputs, labels]\n",
    "        inputs, labels = data\n",
    "        # zero the parameter gradients\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        # forward + backward + optimize\n",
    "        outputs = net(inputs.to(torch.float32).cuda())\n",
    "        _, predicted = torch.max(outputs.cpu().data, 1)\n",
    "        evaluation.append((predicted==labels).tolist())\n",
    "        loss = criterion(outputs, labels.cuda())\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        running_loss += loss.item()\n",
    "    running_loss = running_loss/(i+1)\n",
    "    evaluation = [item for sublist in evaluation for item in sublist]\n",
    "    running_acc = sum(evaluation)/len(evaluation)\n",
    "    validation_loss, validation_acc = Test_Model(net, Testloader,True)\n",
    "\n",
    "print('Finished Training')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 139,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:52: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[5,  10]\tloss: 1.384\tAccuracy : 0.277\t\tval-loss: 1.383\tval-Accuracy : 0.311\n",
      "[10,  10]\tloss: 1.383\tAccuracy : 0.277\t\tval-loss: 1.382\tval-Accuracy : 0.311\n",
      "Finished Training \n",
      " loss: 1.383\tAccuracy : 0.277\t\tval-loss: 1.382\tval-Accuracy : 0.311\n"
     ]
    }
   ],
   "source": [
    "res = TrainTest_Model(BasicCNN, Trainloader, Testloader, n_epoch=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [],
   "source": [
    "def Test_Model(net, Testloader, is_cuda=True):\n",
    "    running_loss = 0.0 \n",
    "    evaluation = []\n",
    "    for i, data in enumerate(Testloader, 0):\n",
    "        input_img, labels = data\n",
    "        optimizer.zero_grad()\n",
    "        input_img = input_img.to(torch.float32)\n",
    "        if is_cuda:\n",
    "            input_img = input_img.cuda()\n",
    "        outputs = net(input_img)\n",
    "        _, predicted = torch.max(outputs.cpu().data, 1)\n",
    "        evaluation.append((predicted==labels).tolist())\n",
    "        loss = criterion(outputs, labels.cuda())\n",
    "        running_loss += loss.item()\n",
    "    running_loss = running_loss/(i+1)\n",
    "    evaluation = [item for sublist in evaluation for item in sublist]\n",
    "    running_acc = sum(evaluation)/len(evaluation)\n",
    "    return running_loss, running_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 138,
   "metadata": {},
   "outputs": [],
   "source": [
    "def TrainTest_Model(model, trainloader, testloader, n_epoch=30, opti='SGD', learning_rate=0.0001, is_cuda=True, print_epoch =5):\n",
    "    if is_cuda:\n",
    "        net = model().cuda()\n",
    "    else :\n",
    "        net = model()\n",
    "        \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    \n",
    "    if opti=='SGD':\n",
    "        optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)\n",
    "    elif opti =='Adam':\n",
    "        optimizer = optim.Adam(CNN.parameters(), lr=learning_rate)\n",
    "    else: \n",
    "        print(\"Optimizer: \"+optim+\" not implemented.\")\n",
    "    \n",
    "    for epoch in range(n_epoch):\n",
    "        running_loss = 0.0\n",
    "        evaluation = []\n",
    "        for i, data in enumerate(Trainloader, 0):\n",
    "            # get the inputs; data is a list of [inputs, labels]\n",
    "            inputs, labels = data\n",
    "            # zero the parameter gradients\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            # forward + backward + optimize\n",
    "            outputs = net(inputs.to(torch.float32).cuda())\n",
    "            _, predicted = torch.max(outputs.cpu().data, 1)\n",
    "            evaluation.append((predicted==labels).tolist())\n",
    "            loss = criterion(outputs, labels.cuda())\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            running_loss += loss.item()\n",
    "\n",
    "        running_loss = running_loss/(i+1)\n",
    "        evaluation = [item for sublist in evaluation for item in sublist]\n",
    "        running_acc = sum(evaluation)/len(evaluation)\n",
    "        validation_loss, validation_acc = Test_Model(net, Testloader,True)\n",
    "        \n",
    "        if epoch%print_epoch==(print_epoch-1):\n",
    "            print('[%d, %3d]\\tloss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epoch, running_loss, running_acc, validation_loss, validation_acc))\n",
    "    \n",
    "    print('Finished Training \\n loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (running_loss, running_acc, validation_loss,validation_acc))\n",
    "    \n",
    "    return (running_loss, running_acc, validation_loss,validation_acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 477,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training Fold 1/5\t of Patient 1\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:19: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Finish Training Fold 1/5\t of Patient 1\n",
      "Begin Training Fold 2/5\t of Patient 1\n",
      "Finish Training Fold 2/5\t of Patient 1\n",
      "Begin Training Fold 3/5\t of Patient 1\n",
      "Finish Training Fold 3/5\t of Patient 1\n",
      "Begin Training Fold 4/5\t of Patient 1\n",
      "Finish Training Fold 4/5\t of Patient 1\n",
      "Begin Training Fold 5/5\t of Patient 1\n",
      "Finish Training Fold 5/5\t of Patient 1\n",
      "loss: 0.854\tAccuracy : 0.855\t\tval-loss: 0.956\tval-Accuracy : 0.805\n",
      "Begin Training Fold 1/5\t of Patient 2\n",
      "Finish Training Fold 1/5\t of Patient 2\n",
      "Begin Training Fold 2/5\t of Patient 2\n",
      "Finish Training Fold 2/5\t of Patient 2\n",
      "Begin Training Fold 3/5\t of Patient 2\n",
      "Finish Training Fold 3/5\t of Patient 2\n",
      "Begin Training Fold 4/5\t of Patient 2\n",
      "Finish Training Fold 4/5\t of Patient 2\n",
      "Begin Training Fold 5/5\t of Patient 2\n",
      "Finish Training Fold 5/5\t of Patient 2\n",
      "loss: 0.824\tAccuracy : 0.872\t\tval-loss: 0.897\tval-Accuracy : 0.852\n",
      "Begin Training Fold 1/5\t of Patient 3\n",
      "Finish Training Fold 1/5\t of Patient 3\n",
      "Begin Training Fold 2/5\t of Patient 3\n",
      "Finish Training Fold 2/5\t of Patient 3\n",
      "Begin Training Fold 3/5\t of Patient 3\n",
      "Finish Training Fold 3/5\t of Patient 3\n",
      "Begin Training Fold 4/5\t of Patient 3\n",
      "Finish Training Fold 4/5\t of Patient 3\n",
      "Begin Training Fold 5/5\t of Patient 3\n",
      "Finish Training Fold 5/5\t of Patient 3\n",
      "loss: 0.778\tAccuracy : 0.883\t\tval-loss: 0.900\tval-Accuracy : 0.841\n",
      "Begin Training Fold 1/5\t of Patient 4\n",
      "Finish Training Fold 1/5\t of Patient 4\n",
      "Begin Training Fold 2/5\t of Patient 4\n",
      "Finish Training Fold 2/5\t of Patient 4\n",
      "Begin Training Fold 3/5\t of Patient 4\n",
      "Finish Training Fold 3/5\t of Patient 4\n",
      "Begin Training Fold 4/5\t of Patient 4\n",
      "Finish Training Fold 4/5\t of Patient 4\n",
      "Begin Training Fold 5/5\t of Patient 4\n",
      "Finish Training Fold 5/5\t of Patient 4\n",
      "loss: 0.770\tAccuracy : 0.960\t\tval-loss: 0.797\tval-Accuracy : 0.950\n",
      "Begin Training Fold 1/5\t of Patient 6\n",
      "Finish Training Fold 1/5\t of Patient 6\n",
      "Begin Training Fold 2/5\t of Patient 6\n",
      "Finish Training Fold 2/5\t of Patient 6\n",
      "Begin Training Fold 3/5\t of Patient 6\n",
      "Finish Training Fold 3/5\t of Patient 6\n",
      "Begin Training Fold 4/5\t of Patient 6\n",
      "Finish Training Fold 4/5\t of Patient 6\n",
      "Begin Training Fold 5/5\t of Patient 6\n",
      "Finish Training Fold 5/5\t of Patient 6\n",
      "loss: 0.814\tAccuracy : 0.888\t\tval-loss: 0.872\tval-Accuracy : 0.872\n",
      "Begin Training Fold 1/5\t of Patient 7\n",
      "Finish Training Fold 1/5\t of Patient 7\n",
      "Begin Training Fold 2/5\t of Patient 7\n",
      "Finish Training Fold 2/5\t of Patient 7\n",
      "Begin Training Fold 3/5\t of Patient 7\n",
      "Finish Training Fold 3/5\t of Patient 7\n",
      "Begin Training Fold 4/5\t of Patient 7\n",
      "Finish Training Fold 4/5\t of Patient 7\n",
      "Begin Training Fold 5/5\t of Patient 7\n",
      "Finish Training Fold 5/5\t of Patient 7\n",
      "loss: 0.785\tAccuracy : 0.893\t\tval-loss: 0.862\tval-Accuracy : 0.885\n",
      "Begin Training Fold 1/5\t of Patient 8\n",
      "Finish Training Fold 1/5\t of Patient 8\n",
      "Begin Training Fold 2/5\t of Patient 8\n",
      "Finish Training Fold 2/5\t of Patient 8\n",
      "Begin Training Fold 3/5\t of Patient 8\n",
      "Finish Training Fold 3/5\t of Patient 8\n",
      "Begin Training Fold 4/5\t of Patient 8\n",
      "Finish Training Fold 4/5\t of Patient 8\n",
      "Begin Training Fold 5/5\t of Patient 8\n",
      "Finish Training Fold 5/5\t of Patient 8\n",
      "loss: 0.780\tAccuracy : 0.951\t\tval-loss: 0.795\tval-Accuracy : 0.953\n",
      "Begin Training Fold 1/5\t of Patient 9\n",
      "Finish Training Fold 1/5\t of Patient 9\n",
      "Begin Training Fold 2/5\t of Patient 9\n",
      "Finish Training Fold 2/5\t of Patient 9\n",
      "Begin Training Fold 3/5\t of Patient 9\n",
      "Finish Training Fold 3/5\t of Patient 9\n",
      "Begin Training Fold 4/5\t of Patient 9\n",
      "Finish Training Fold 4/5\t of Patient 9\n",
      "Begin Training Fold 5/5\t of Patient 9\n",
      "Finish Training Fold 5/5\t of Patient 9\n",
      "loss: 0.769\tAccuracy : 0.965\t\tval-loss: 0.808\tval-Accuracy : 0.930\n",
      "Begin Training Fold 1/5\t of Patient 10\n",
      "Finish Training Fold 1/5\t of Patient 10\n",
      "Begin Training Fold 2/5\t of Patient 10\n",
      "Finish Training Fold 2/5\t of Patient 10\n",
      "Begin Training Fold 3/5\t of Patient 10\n",
      "Finish Training Fold 3/5\t of Patient 10\n",
      "Begin Training Fold 4/5\t of Patient 10\n",
      "Finish Training Fold 4/5\t of Patient 10\n",
      "Begin Training Fold 5/5\t of Patient 10\n",
      "Finish Training Fold 5/5\t of Patient 10\n",
      "loss: 0.774\tAccuracy : 0.939\t\tval-loss: 0.803\tval-Accuracy : 0.943\n",
      "Begin Training Fold 1/5\t of Patient 11\n",
      "Finish Training Fold 1/5\t of Patient 11\n",
      "Begin Training Fold 2/5\t of Patient 11\n",
      "Finish Training Fold 2/5\t of Patient 11\n",
      "Begin Training Fold 3/5\t of Patient 11\n",
      "Finish Training Fold 3/5\t of Patient 11\n",
      "Begin Training Fold 4/5\t of Patient 11\n",
      "Finish Training Fold 4/5\t of Patient 11\n",
      "Begin Training Fold 5/5\t of Patient 11\n",
      "Finish Training Fold 5/5\t of Patient 11\n",
      "loss: 0.768\tAccuracy : 0.944\t\tval-loss: 0.832\tval-Accuracy : 0.920\n",
      "Begin Training Fold 1/5\t of Patient 12\n",
      "Finish Training Fold 1/5\t of Patient 12\n",
      "Begin Training Fold 2/5\t of Patient 12\n",
      "Finish Training Fold 2/5\t of Patient 12\n",
      "Begin Training Fold 3/5\t of Patient 12\n",
      "Finish Training Fold 3/5\t of Patient 12\n",
      "Begin Training Fold 4/5\t of Patient 12\n",
      "Finish Training Fold 4/5\t of Patient 12\n",
      "Begin Training Fold 5/5\t of Patient 12\n",
      "Finish Training Fold 5/5\t of Patient 12\n",
      "loss: 0.767\tAccuracy : 0.924\t\tval-loss: 0.810\tval-Accuracy : 0.944\n",
      "Begin Training Fold 1/5\t of Patient 14\n",
      "Finish Training Fold 1/5\t of Patient 14\n",
      "Begin Training Fold 2/5\t of Patient 14\n",
      "Finish Training Fold 2/5\t of Patient 14\n",
      "Begin Training Fold 3/5\t of Patient 14\n",
      "Finish Training Fold 3/5\t of Patient 14\n",
      "Begin Training Fold 4/5\t of Patient 14\n",
      "Finish Training Fold 4/5\t of Patient 14\n",
      "Begin Training Fold 5/5\t of Patient 14\n",
      "Finish Training Fold 5/5\t of Patient 14\n",
      "loss: 0.795\tAccuracy : 0.904\t\tval-loss: 0.895\tval-Accuracy : 0.854\n",
      "Begin Training Fold 1/5\t of Patient 15\n",
      "Finish Training Fold 1/5\t of Patient 15\n",
      "Begin Training Fold 2/5\t of Patient 15\n",
      "Finish Training Fold 2/5\t of Patient 15\n",
      "Begin Training Fold 3/5\t of Patient 15\n",
      "Finish Training Fold 3/5\t of Patient 15\n",
      "Begin Training Fold 4/5\t of Patient 15\n",
      "Finish Training Fold 4/5\t of Patient 15\n",
      "Begin Training Fold 5/5\t of Patient 15\n",
      "Finish Training Fold 5/5\t of Patient 15\n",
      "loss: 0.772\tAccuracy : 0.967\t\tval-loss: 0.768\tval-Accuracy : 0.986\n"
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "fold_vloss = np.zeros((n_fold,n_patient))\n",
    "fold_loss = np.zeros((n_fold,n_patient))\n",
    "fold_vacc = np.zeros((n_fold,n_patient))\n",
    "fold_acc = np.zeros((n_fold,n_patient))\n",
    "for patient in np.unique(Patient):\n",
    "    id_patient = np.arange(len(Mean_Images))[Patient==patient]\n",
    "    n_fold = 5\n",
    "    length = len(id_patient)\n",
    "    \n",
    "    n_patient = len(np.unique(Patient))\n",
    "    \n",
    "    train_id, test_id = kfold(length,n_fold)\n",
    "    \n",
    "    for fold in range(n_fold):\n",
    "        X_train = Mean_Images[id_patient[train_id[fold]]]\n",
    "        X_test = Mean_Images[id_patient[test_id[fold]]]\n",
    "        y_train = Label[id_patient[train_id[fold]]]\n",
    "        y_test = Label[id_patient[test_id[fold]]] \n",
    "\n",
    "        print(\"Begin Training Fold %d/%d\\t of Patient %d\" % \n",
    "             (fold+1,n_fold, patient))\n",
    "\n",
    "        CNN = BasicCNN().cuda(0)\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n",
    "\n",
    "        n_epochs = 50\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 5\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "\n",
    "            #acc\n",
    "            _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n",
    "            acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n",
    "\n",
    "            #val Loss\n",
    "            val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n",
    "            val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n",
    "            _, idx = torch.max(val_outputs.data,1)\n",
    "            val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n",
    "\n",
    "            #if epoch%10==0:\n",
    "            #    print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "            #     (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "        fold_vloss[fold, p ] = val_loss.item()\n",
    "        fold_loss[fold, p] = running_loss/i\n",
    "        fold_vacc[fold, p] = val_acc\n",
    "        fold_acc[fold, p] = acc\n",
    "        print('Finish Training Fold %d/%d\\t of Patient %d' % \n",
    "             (fold+1,n_fold, patient))\n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Peresented Results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 493,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtcAAAKUCAYAAADPQhSfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdfZild10f/vfHXR4qIWGX4CokkICRDhkrmhTaH2u744pEfq2pIpWxRqCj+dmf2bZAqeBACNFRfKjaKrVFJi0IzooppalNDUjOqFv1MknFBzJFQ3haowLZ5WERhF2+/eOcCbPDzO7M5D7zcPb1uq5z7bmfP585M5P33Pne912ttQAAAA/el2x1AQAAMCqEawAA6IhwDQAAHRGuAQCgI8I1AAB0RLgGAICOCNcAm6yqXlBVR5ZMn6iqJ65l3Q0c639W1fM3uj0A6yNcA9teVX1nVd05CKF/PgiM+7eolodX1ceq6htWWPbTVXXzevfZWjuvtXZvB7XdUFVvWrbvb26tveHB7vssx2xV9bRhHQNgJxGugW2tql6c5GeS/EiSfUken+TfJ7l6lfV3D7Oe1tpnkvxyku9edtxdSSaTDC3IbjdVVUmuSXIsyaaeHa8+/w0Dth2/mIBtq6ouSHJjku9vrb21tfap1trnWmv/vbX20sE6N1TVzVX1pqr6RJIXVNXDqupnquq+wetnquphg/UvrKpfHZx9PlZVv7UY0qrqB6rqz6rqk1X1nqo6uEppb0jynKr60iXznpX+79T/OdjXy6rqvYN93V1V33qGPltVfeXg/aOr6paq+kRV/V6SJy1b999W1YcGy++qqq8fzL8qyQ8m+Y7BGf4/GMyfr6rvGbz/kqp6RVV9oKo+XFVvHHyNU1WXDOp4flV9sKo+WlXTZ/mIvj7JY5P8iyTPq6qHLqv1e6tqYcnX4OsG8y+uqrdW1Ueq6v6q+rnB/NPOvC+pafeSXmaq6n8l+askT6yqFy45xr1V9f8tq+HqqnrX4Ov13qq6qqqeW1V3LVvvJVX1trP0C3BWwjWwnf3dJA9P8l/Pst7VSW5O8qgkb04yneTvJHlqkq9J8rQkrxis+5IkR5M8Jv0z4T+YpFXVk5Ncl+Rvt9YemX5Yfv9KB2ut/XaSP0/ybUtmX5Pkl1prJwfT700/fF6Q5NVJ3lRVX7GGnl+b5DNJviLJPx28lrpj0NfeJL+U5Feq6uGttV9L/+z+Lw+GmXzNCvt+weA1keSJSc5L8nPL1tmf5MlJDia5vqrGzlDr85P89/TP5CfJP1hcUFXPTXJD+mf4z0/yLUnuH5zh/9UkH0hySZLHJTl8hmMsd02Sa5M8crCPDw+Oe36SFyb56SUh/mlJ3pjkpel/b/y99D/TW5Jcuqy370ryi+uoA2BFwjWwnT06yUeXBNbV/E5r7W2ttc+31j6d5J8kubG19uHW2kfSD7fXDNb9XPrB9QmDs+C/1VprSU4leViSp1TVQ1pr72+tvfcMx3xjBkNDqur89AP+A0NCWmu/0lq7b1DTLyf50/RD/qoGwfM5Sa4fnKX/4ywbZtJae1Nr7f7W2snW2r8Z1Pzks3x9Fv2TJD/VWru3tXYiycvTP+O8dCjNq1trn26t/UGSP0j/j5OVav3SJM9N/w+Kz6X/x83SoSHfk+THW2t3tL57WmsfGHwNHpvkpYMeP9NaW88Fm/+5tfbuQf+fa639j9baewfH+I0kb0//j5okmUpyU2vtHYPP4c9aa/+ntfbX6f9B8F2DXi5PP+j/6jrqAFiRcA1sZ/cnuXAN46g/tGz6semf1Vz0gcG8JPmJJPckeftgGMHLkqS1dk+Sf5n+2dYPV9Xhqnps8sDdPBZfjx/s541JJqrqcUm+Pck9rbXfXzxgVX33YDjCx6rqY0nGk1x4lj4ek2T3sn6W9rE4fGGhqj4+2O8Fa9jvopW+LrvTP4O/6C+WvP+r9M9ur+Rbk5xMcutg+s1JvrmqHjOYvjj9s/fLXZzkA2v4g2k1p33WVfXNVfW7gyE+H0vy7Hzh67FaDUn/j5bvrHpg3PhbBqEb4EERroHt7HfSHyLxj86yXls2fV+SJyyZfvxgXlprn2ytvaS19sQk/zDJixfHVrfWfqm1tn+wbUvyY4P55y15fXAw74NJfiv9s8HXpB+2kyRV9YQkv5D+MJNHt9YeleSPk9RZ+vhI+oH14mW1L+7365P8QJJ/nGTPYL8fX7Lf5V+H5Vb6upxM8pdn2W4lz08/eH+wqv4iya8keUj6F3Um/RD8pBW2+1CSx6/yB9Onkiwdx/7lK6zzQI/VH0f/X5L8ZJJ9g6/HrfnC12O1GtJa+90kn03/LPd3xpAQoCPCNbBttdY+nuT6JK+tqn9UVV9aVQ8ZnK388TNsOpfkFVX1mKq6cLCPNyVJVf2DqvrKwRnLT6Q/HORUVT25qr5hENg+k+TTg2Vn8ob0A/Qz0j9zu+gR6YfAjwyO+cL0z1yfrd9TSd6a5IZBr0/J6UMtHpl+GP5Ikt1VdX36Y40X/WWSS2r1u2jMJXlRVV1aVeflC2O013UWeXC2/mD6Y52fmi+Mbf+xJfW+Psm/qqorqu8rB390/F7649VfU1WPqP6tDZ8x2OZdSf5eVT1+cKHly89SykPTHxbzkSQnq+qbk3zTkuWzSV5YVQerfzHn46rqby5Z/sb0x5yfXOfQFIBVCdfAttZa+6kkL07/gsSPpH828rokZ7qzww8nuTPJHyb5oyT/ezAvSS5L8utJTqR/Zvzft9bm0w9pr0ny0fSHRnxZ+hc7nsnNSfYkeWdr7c+X1Hx3kn8z2P9fJvnqJP9rLf0OejtvUMN/TvKfliy7Lf27kfxJ+kM6PpPTh0n8yuDf+6vqf6+w75vSP0P7m0neN9j+0BrrWuqaJO9qrb29tfYXi68k/y7J36qq8dbarySZSf+iy0+m/3ntHfwB8Q+TfGWSD6Z/cel3JElr7R3pj4X+wyR35SxjoFtrn0zyz5O8Jcnx9M9A37Jk+e9lcJFj+mf4fyOnn7n/xfT/6HHWGuhM9a/jAYBzS1X9jfTvNvJ1rbU/3ep6gNHgzDUA56p/luQOwRro0lCfZAYA21FVvT/9Cx/PdrEswLoYFgIAAB0xLAQAADoiXANsgqpqVfWVHe/zB6vq9V3uE4AHR7gG2KFaaz/SWvuejWxbVc+qqt+sqk9W1Ueq6jeq6lsGy14w+GPgpcu2OVpVBwbvbxis89wly3cP5l2y4aYAdjjhGuAcU1Xfnv49sd+Y5KL0H39+ffr3n150LMkPVNX5X7yH09a5sap2DatWgJ1GuAbYPM+uqnur6qNV9ROLT1KsqidV1e1Vdf9g2Zur6lGLG1XVD1TVnw3OMr9n8XHtg7PHb1qy3v6q+u2q+lhVfaiqXrC8gMGTKX8qyQ+11l7fWvt4a+3zrbXfaK1975JVF9J/CM6LztDPr6X/CPHvehBfE4CRIlwDbJ5vTXJlkq9LcnWSfzqYX0l+NMljk4wluTjJDUlSVU9O/6mNf7u19sgkz0ry/uU7rqrHp//0xp9N8pj0H0n+rhVqePJg/zevod5Xpv+49L2rLG+DdV5VVQ9Zw/4ARp5wDbB5fqy1dqy19sEkP5NkMklaa/e01t7RWvvr1tpH0j+z/PcH25xK/9HsT6mqh7TW3t9ae+8K+/4nSX69tTbXWvtca+3+1tpK4frRg3//fIVlpxls//YkP3CGdW5J/7H0Gxr7DTBqhGuAzfOhJe8/kP6Z6lTVl1XV4cHQj08keVOSC5N+8E7yL9M/k/3hwXqPXWHfFydZKXQvd//g369YY83XJ/lnVfXlZ1jnFUmmkzx8jfsEGFnCNcDmuXjJ+8cnuW/w/kfTH2Lxt1pr56c/hrkWV2yt/VJrbX+SJwzW+7EV9v2hJE9aQw3vGaz7nLUU3Fr7P0nemuQHz7DOO5Lck+T/X8s+AUaZcA2weV5aVXuq6uIk/yLJLw/mPzLJiSQfq6rHJXngFnhV9eSq+oaqeliSzyT5dPpDRZZ7c5JvrKp/PLgl3qOr6qnLV2r9x/K+OMkrq+qFVXV+VX3J4GLI161S96uTvDDJo1ZZnvTPXP/rMzUPcC4QrgE2z39Lclf6Fxr+jySzg/mvTv8ix48P5r91yTYPS/KaJB9N8hdJviwrnEUejON+dpKXpH+LvHcl+ZqVimit3ZzkO9K/oPK+JH+Z5IcH9a20/vuS/GKSR6zWWGvtfyX5vdWWA5wrqn8SAwAAeLCcuQYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoyO6tLqArF154YbvkkkuGfpxPfepTecQjHjH042yWUepnlHpJRqufUeolGa1+RqmXZLT6GaVektHqZ5R6SUarn83q5a677vpoa+0xKy0bmXB9ySWX5M477xz6cebn53PgwIGhH2ezjFI/o9RLMlr9jFIvyWj1M0q9JKPVzyj1koxWP6PUSzJa/WxWL1X1gdWWGRYCAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4MLVxX1U1V9eGq+uNVlldV/buquqeq/rCqvm7JsudX1Z8OXs8fVo0A56q5ubmMj4/n4MGDGR8fz9zc3FaXBNuenxvWYvcQ9/2fk/xckjeusvybk1w2eD09yc8neXpV7U3yqiRXJmlJ7qqqW1prx4dYK8A5Y25uLtPT05mdnc2pU6eya9euTE1NJUkmJye3uDrYnvzcsFZDO3PdWvvNJMfOsMrVSd7Y+n43yaOq6iuSPCvJO1prxwaB+h1JrhpWnQDnmpmZmczOzmZiYiK7d+/OxMREZmdnMzMzs9Wlwbbl54a1qtba8HZedUmSX22tja+w7FeTvKa1dmQw/c4kP5DkQJKHt9Z+eDD/lUk+3Vr7yRX2cW2Sa5Nk3759Vxw+fHg4jSxx4sSJnHfeeUM/zmYZpX5GqZdkZ/YzMTGxoe16vV7HlTx4o9TLcgcPHsxtt92W3bt3P/B9dvLkyTzrWc/KO9/5zq0u70HZiT83qxmlXpKd34+fm813YP7qTTvW/IH/tq71JyYm7mqtXbnSsmEOCzmbWmFeO8P8L57Z2uuSvC5JrrzyynbgwIHOilvN/Px8NuM4m2WU+hmlXpKd2c9qf6xX1arLtqsz1bsT+1lqbGwsu3btyoEDBx74Puv1ehkbG9tx33PL7cSfm9WMUi/Jzu/Hz80WOPDxdW+y0V7Wv8XqtvJuIUeTXLxk+qIk951hPgAdmJ6eztTUVHq9Xk6ePJler5epqalMT09vdWmwbfm5Ya228sz1LUmuq6rD6V/Q+PHW2p9X1W1JfqSq9gzW+6YkL9+qIgFGzeLFV4cOHcrCwkLGxsYyMzPjoiw4Az83rNXQwnVVzaV/lv3Cqjqa/h1AHpIkrbX/kOTWJM9Ock+Sv0rywsGyY1X1Q0nuGOzqxtbamS6MBGCdJicnMzk5uX3/dzBsQ35uWIuhhevW2hn/lGv9AYvfv8qym5LcNIy6AABgWDyhEQAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAzN3NxcxsfHc/DgwYyPj2dubm6rS4Kh2r3VBQAAo2lubi7T09OZnZ3NqVOnsmvXrkxNTSVJJicnt7g6GA5nrgGAoZiZmcns7GwmJiaye/fuTExMZHZ2NjMzM1tdGgyNcA0ADMXCwkL2799/2rz9+/dnYWFhiyqC4ROuAYChGBsby5EjR06bd+TIkYyNjW1RRTB8xlwDwJDt3bs3x48fH/px9uzZk2PHjg39OGs1PT2dqampB8Zc93q9TE1NGRbCSBOuAWDIjh8/ntbauraZn5/PgQMH1rVNVa1r/WFbvGjx0KFDWVhYyNjYWGZmZlzMyEgTrmETnKtnrQAmJyczOTm5oT8WYCcy5ho2weJZq/W8er3eurfZjAAPAKxOuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI7s3uoCAAC2zA0XrHuTA0kyv5FjfXwDG7HTDDVcV9VVSf5tkl1JXt9ae82y5U9IclOSxyQ5luS7WmtHB8tOJfmjwaofbK19yzBrBWDnmpuby8zMTBYWFjI2Npbp6elMTk5udVnsAPXqT6S1tq5t5ufnc+DAgfUdpyrthnVtwg41tHBdVbuSvDbJM5McTXJHVd3SWrt7yWo/meSNrbU3VNU3JPnRJNcMln26tfbUYdUHwGiYm5vL9PR0Zmdnc+rUqezatStTU1NJImADm26YY66fluSe1tq9rbXPJjmc5Opl6zwlyTsH73srLAeAM5qZmcns7GwmJiaye/fuTExMZHZ2NjMzM1tdGmwbVbXqa2JiYtVlrN8wh4U8LsmHlkwfTfL0Zev8QZLnpD905FuTPLKqHt1auz/Jw6vqziQnk7ymtfa25QeoqmuTXJsk+/bty/z8fOdNLHfixIlNOc5mGaV+tnMv7VXnr3tc34Fk3WP62qvO37Zfg+1a10aNSj/b+edmrRYWFnLq1KnMz88/0M+pU6eysLCwrXpbby0b/Wy2U89LbefvtXPhs+n1eqsum5iYWHX5dv3MVrMtvs9aa0N5JXlu+uOsF6evSfKzy9Z5bJK3Jvn99AP20SQXLC4b/PvEJO9P8qQzHe+KK65om6HX623KcTbLKPUzSr20Nlr99H/VjI5R6mcUvs8uv/zydvvtt7fWvtDP7bff3i6//PItrOp0G/me2chns52/N7fr95rPZnvXtl6b9X2W5M62SiYd5pnro0kuXjJ9UZL7lq7QWrsvybclSVWdl+Q5rbWPL1mW1tq9VTWf5GuTvHeI9bJDbPR/U7V1XrDCyvbu3Zvjx4+ve7v1fm579uzJsWPH1n0czj3T09OZmpp6YMx1r9fL1NSUYSHAlhhmuL4jyWVVdWmSP0vyvCTfuXSFqrowybHW2ueTvDz9O4ekqvYk+avW2l8P1nlGkh8fYq3sIKuF5KoSoDfB8ePHN+3KeliLxYsWDx069MDdQmZmZlzMCGyJoYXr1trJqrouyW3p34rvptbau6vqxvRPpd+S/rDSH62qluQ3k3z/YPOxJP+xqj6f/kWXr2mn32UEgOXO4fv1Tk5OZnJyckN/yAF0aaj3uW6t3Zrk1mXzrl/y/uYkN6+w3W8n+eph1gYwcjYQeIVRgG55/DkAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAMCONjc3l/Hx8Rw8eDDj4+OZm5vbslp2b9mRAYAdae/evTl+/PjQj7Nnz54cO3Zs6MdhZ5ubm8v09HRmZ2dz6tSp7Nq1K1NTU0mSycnJTa/HmWsAYF2OHz+e1tq6Xr1eb93bbEaAZ+ebmZnJ7OxsJiYmsnv37kxMTGR2djYzMzNbUo9wDQDAjrWwsJD9+/efNm///v1ZWFjYknqEawAAdqyxsbEcOXLktHlHjhzJ2NjYltRjzDUAcE6rqqEfY8+ePUM/xrlqeno6U1NTD4y57vV6mZqa2rJhIcI1AHDOaq2te5v5+fkcOHCg+2LYkMWLFg8dOpSFhYWMjY1lZmZmSy5mTIRrABi69qrzkxsuWNc2B5JkfgPHgXPQ5ORkJicnt8UfPsI1AAzbDR9f9yZVtaGzqsDWckEjAAB0RLgGABgRe/fuTVWt65Vk3dvs3bt3izvdvoRrAIAR4QE/W8+Ya+CcttHHOK/31l0e47wB67wAMNnYRYD9Y61/TDTASoRr4Jy2eJZnPTZyNfpm3Ed35Gwg8G6HOwUA5zbDQgDOQXNzcxkfH8/BgwczPj6eubm5rS4JYCQ4cw1wjpmbm8v09PQDTzPbtWtXpqamkmTLHroAMCqcuQY4x8zMzGR2djYTExPZvXt3JiYmMjs7u2WPCgYYJcI1wDlmYWEh+/fvP23e/v37s7CwsEUVAYwO4RrgHDM2NpYjR46cNu/IkSMZGxvboooARodwDXCOmZ6eztTUVHq9Xk6ePJler5epqalMT09vdWkAO54LGgHOMYsXLR46dCgLCwsZGxvLzMyMixkBOiBcA5yDJicnMzk56b7QAB0zLAQAADoiXAMAQEeEawAA6IhwDQAAHRGuAQCgI8I1AAB0RLgGAICOuM81ALAu7VXnJzdcsK5tDiTJ/AaOAzuMcA0ArM8NH1/3Jh5YxLnCsBAAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA64j7Xo2SdN/RPNnZT//6x1n+PUwC+WFVtaFlrbRjlAA+ScD1C6tWfWPcv243c1L+q0m5Y1yYArGK139seugI7k3ANADAiPJp+6w01XFfVVUn+bZJdSV7fWnvNsuVPSHJTksckOZbku1prRwfLnp/kFYNVf7i19oZh1grbxdzcXGZmZrKwsJCxsbFMT09ncnJyq8sCYAfwf7G33tDCdVXtSvLaJM9McjTJHVV1S2vt7iWr/WSSN7bW3lBV35DkR5NcU1V7k7wqyZVJWpK7BtseH1a9sB3Mzc1leno6s7OzOXXqVHbt2pWpqakkEbABYAcY5t1Cnpbkntbava21zyY5nOTqZes8Jck7B+97S5Y/K8k7WmvHBoH6HUmuGmKtsC3MzMxkdnY2ExMT2b17dyYmJjI7O5uZmZmtLg0AWINhDgt5XJIPLZk+muTpy9b5gyTPSX/oyLcmeWRVPXqVbR+3/ABVdW2Sa5Nk3759mZ+f76r2VZ04cWJTjrNR661to/1s16/Bdq1rrRYWFnLq1KnMz88/8NmcOnUqCwsL26q3Ufs+G7V+1mO7/05br1HqZ5R6SUarn+3ei99p81tbRGttKK8kz01/nPXi9DVJfnbZOo9N8tYkv59+wD6a5IIkL03yiiXrvTLJS850vCuuuKJthl6vtynH2Yj+x7k+G+lnI8fZDNu1rvW4/PLL2+23395a+8Jnc/vtt7fLL798C6s63ah9n41aP+u1nX+nbcQo9TNKvbQ2Wv1s5178TuttynGS3NlWyaTDHBZyNMnFS6YvSnLf0hVaa/e11r6ttfa1SaYH8z6+lm1hFE1PT2dqaiq9Xi8nT55Mr9fL1NRUpqent7o0AGANhjks5I4kl1XVpUn+LMnzknzn0hWq6sIkx1prn0/y8vTvHJIktyX5karaM5j+psFyGGmLFy0eOnTogbuFzMzMbKuLGd3mCQBWN7Rw3Vo7WVXXpR+UdyW5qbX27qq6Mf1T6bek/9/cH62qluQ3k3z/YNtjVfVD6Qf0JLmxtXZsWLXCdjI5OZnJyclt+wAJt3kCgNUN9T7XrbVbk9y6bN71S97fnOTmVba9KV84kw0AANveMMdcAwDAOUW4BgCAjgjXAADQEeEaAAA6MtQLGneyqtrQduu9iwIAAKNDuF7FaiG5qgRoAABWZFgIAAB0RLgGAICOCNcAANAR4RoAADoiXAMAQEeEawCANZibm8v4+HgOHjyY8fHxzM3NbXVJbENuxQcAcBZzc3OZnp7O7OxsTp06lV27dmVqaipJMjk5ucXVsZ04cw0AcBYzMzOZnZ3NxMREdu/enYmJiczOzmZmZmarS2ObEa4BAM5iYWEh+/fvP23e/v37s7CwsEUVsV0J1wAAZzE2NpYjR46cNu/IkSMZGxvboorYroRrAICzmJ6eztTUVHq9Xk6ePJler5epqalMT09vdWlsMy5oBAA4i8WLFg8dOpSFhYWMjY1lZmbGxYx8EeEaAGANJicnMzk5mfn5+Rw4cGCry2GbMiwEAAA6IlwDAEBHDAsBABghVTX0Y+zZs2fox9ipnLkGABgRrbV1v3q93rq3OXbs2Fa3um0J1wAA0BHhGgAAOmLMNXBOa686P7nhgnVtcyBJ5jdwHABGnnANnNPq1Z9Ia21d22zkHrdVlXbDujYBYAcyLAQAADoiXAMAQEeEa7alvXv3pqrW9Uqy7m327t27xZ0CAKNEuGZbOn78+Kbcp/P48eNb3SoAMEKEawAA6IhwDQAAHRGuAYChmZuby/j4eA4ePJjx8fHMzc1tdUkwVO5zDQAMxdzcXKanpzM7O5tTp05l165dmZqaSpJMTk5ucXUwHM5cAwBDMTMzk9nZ2UxMTGT37t2ZmJjI7OxsZmZmtro0GBrhGgAYioWFhezfv/+0efv378/CwsIWVQTDJ1wDAEMxNjaWI0eOnDbvyJEjGRsb26KKYPjO6XDtQSUAMDzT09OZmppKr9fLyZMn0+v1MjU1lenp6a0uDYbmnL6gcfFBJesxPz+fAwcOrGubxVAOAOeSxYsWDx06lIWFhYyNjWVmZsbFjIy0czpcAwDDNTk5mcnJyQ2dnIKd6JweFgIAAF0SrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQkaGG66q6qqreU1X3VNXLVlj++KrqVdXvV9UfVq/LnxQAACAASURBVNWzB/MvqapPV9W7Bq//MMw6AQCgC7uHteOq2pXktUmemeRokjuq6pbW2t1LVntFkre01n6+qp6S5NYklwyWvbe19tRh1QcAAF0b5pnrpyW5p7V2b2vts0kOJ7l62TotyfmD9xckuW+I9QAAwFAN7cx1kscl+dCS6aNJnr5snRuSvL2qDiV5RJJvXLLs0qr6/SSfSPKK1tpvLT9AVV2b5Nok2bdvX+bn59dd5Hq3OXHixKYcZ6NGqZ9R6mUjNtrPZhi1z2bU+lmP7fx9thGj1M8o9ZKMVj+j1EsyWv1si15aa0N5JXluktcvmb4myc8uW+fFSV4yeP93k9yd/tn0hyV59GD+FemH9PPPdLwrrriirVe//fXp9XqbcpyNGKV+RqmXjdpIP5th1D6bUetnvbbr99lGjVI/o9RLa6PVzyj10tpo9bNZvSS5s62SSYc5LORokouXTF+ULx72MZXkLen/V+d3kjw8yYWttb9urd0/mH9Xkvcm+aoh1goAAA/aMMP1HUkuq6pLq+qhSZ6X5JZl63wwycEkqaqx9MP1R6rqMYMLIlNVT0xyWZJ7h1grAAA8aEMbc91aO1lV1yW5LcmuJDe11t5dVTemfyr9liQvSfILVfWi9C9ufEFrrVXV30tyY1WdTHIqyfe11o4Nq1YAAOjCMC9oTGvt1vRvr7d03vVL3t+d5BkrbPdfkvyXYdbG9tZedX5ywwXr2uZAksxv4DisW1UN/Rh79uwZ+jEAoGtDDdewUfXqTyxe9Lpm8/PzOXDgwPqOU5V2w7o2Oeet93NJNvbZAMBO5PHnAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjpzTD5HxFEAAALp0TodrTwEEAKBLhoUAAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQkd1bXQDAVquqoR9jz549Qz8GAFtPuAbOaa21dW9TVRvaDoDRZ1gIAAB0RLgGAICOCNcAANCRs4brqrquqlyJAwAAZ7GWM9dfnuSOqnpLVV1Vm3FZPQAA7EBnDdettVckuSzJbJIXJPnTqvqRqnrSkGsDAIAdZU1jrlv/nlN/MXidTLInyc1V9eNDrA0AAHaUs97nuqr+eZLnJ/loktcneWlr7XNV9SVJ/jTJvx5uiQAAsDOs5SEyFyb5ttbaB5bObK19vqr+wXDKAgCAnWctw0JuTXJscaKqHllVT0+S1trCsAoDAICdZi3h+ueTnFgy/anBPAAAYIm1hOsaXNCYpD8cJGsbTgIAAOeUtYTre6vqn1fVQwavf5Hk3mEXBgAAO81awvX3Jfl/kvxZkqNJnp7k2mEWBQAAO9FZh3e01j6c5HmbUAsAAOxoa7nP9cOTTCW5PMnDF+e31v7pEOsCAIAdZy3DQn4xyZcneVaS30hyUZJPDrMoAADYidYSrr+ytfbKJJ9qrb0hyf+b5KuHWxYAAOw8awnXnxv8+7GqGk9yQZJLhlYRAADsUGu5X/XrqmpPklckuSXJeUleOdSqAABgBzpjuK6qL0nyidba8SS/meSJm1IVAADsQGccFjJ4GuN1m1QLAADsaGsZc/2OqvpXVXVxVe1dfA29MgAA2GHWMuZ68X7W379kXoshIgAAcJq1PKHx0s0oBAAAdrq1PKHxu1ea31p7Y/flAADAzrWWMdd/e8nr65PckORb1rLzqrqqqt5TVfdU1ctWWP74qupV1e9X1R9W1bOXLHv5YLv3VNWz1tQNAABsobUMCzm0dLqqLkj/kehnVFW7krw2yTOTHE1yR1Xd0lq7e8lqr0jyltbaz1fVU5LcmuSSwfvnJbk8yWOT/HpVfVVr7dQa+wIAgE23ljPXy/1VksvWsN7TktzTWru3tfbZJIeTXL1snZbk/MH7C5LcN3h/dZLDrbW/bq29L8k9g/0BAMC2tZYx1/89/RCc9MP4U5K8ZQ37flySDy2ZPprk6cvWuSHJ26vqUJJHJPnGJdv+7rJtH7dCbdcmuTZJ9u3bl/n5+TWUdbr1bnPixIlNOc5GjVI/o9TLRmy0n+1oJ/YyMTFxxuVVteL8Xq83jHKGZid+NmcySv2MUi/JaPUzSr0ko9XPtuiltXbGV5K/v+T1jCQXnW2bwXbPTfL6JdPXJPnZZeu8OMlLBu//bpK70w/wr03yXUvWm03ynDMd74orrmjr1W9/fXq93qYcZyNGqZ9R6mWjNtLPdjVKvbQ2Wv2MUi+tjVY/o9RLa6PVzyj10tpo9bNZvSS5s62SSddyn+sPJvnz1tpnkqSq/kZVXdJae/9Ztjua5OIl0xflC8M+Fk0luWoQ8n+nqh6e5MI1bgsAANvKWsZc/0qSzy+ZPjWYdzZ3JLmsqi6tqoemf4HiLcvW+WCSg0lSVWNJHp7kI4P1nldVD6uqS9Mf4/17azgmAABsmbWcud7d+hckJklaa58dhOUzaq2drKrrktyWZFeSm1pr766qG9M/lX5Lkpck+YWqelH647pfMDjV/u6qekv6w0ROJvn+5k4hAABsc2sJ1x+pqm8ZhOFU1dVJPrqWnbfWbk3/9npL512/5P3d6Y/jXmnbmSQzazkOAABsB2sJ19+X5M1V9XOD6aNJVnxqIwAAnMvW8hCZ9yb5O1V1XpJqrX1y+GXB6rc669KePXuGfgwA4Nxx1gsaq+pHqupRrbUTrbVPVtWeqvrhzSiOc9dqt7c502sj2x07dmyLOwUARsla7hbyza21jy1OtNaOJ3n28EoCAICdaS3heldVPWxxoqr+RpKHnWF9AAA4J63lgsY3JXlnVf2nwfQLk7xheCUBAMDOtJYLGn+8qv4wyTcmqSS/luQJwy4MAAB2mrUMC0mSv0j/KY3PSf+JigtDqwgAAHaoVc9cV9VXpf/I8skk9yf55fRvxTexSbUBAMCOcqZhIf8nyW8l+YettXuSZPCYcgAAYAVnGhbynPSHg/Sq6heq6mD6Y64BAIAVrBquW2v/tbX2HUn+ZpL5JC9Ksq+qfr6qvmmT6gMAgB1jLXcL+VSSNyd5c1XtTfLcJC9L8vYh18YGeGQ4AMDWWct9rh/QWjuW5D8OXmwzi48AX4+q2tB2AAB8sbXeig8AADiLdZ25HkWGUQAA0JVz+sx1a23dr16vt+5tjh07ttWtAgCwCc7pcA0AAF0SrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB3ZvdUFMHxVtaHlrbVhlAMAMLKcuT4HtNZWffV6vVWXAQCwPsI1AAB0RLgGAICOCNcAANAR4RoAADoiXAMAQEeEawAA6IhwDQAAHRGuAQCgI8I1AAB0RLgGAICODDVcV9VVVfWeqrqnql62wvKfrqp3DV5/UlUfW7Ls1JJltwyzTgAA6MLuYe24qnYleW2SZyY5muSOqrqltXb34jqttRctWf9Qkq9dsotPt9aeOqz6AACga8M8c/20JPe01u5trX02yeEkV59h/ckkc0OsBwAAhqpaa8PZcdW3J7mqtfY9g+lrkjy9tXbdCus+IcnvJrmotXZqMO9kknclOZnkNa21t62w3bVJrk2Sffv2XXH48OGh9LLUiRMnct555w39OJtllPqZmJhIr9fb6jI6M0qfzSj1koxWP6PUSzJa/YxSL8lo9TNKvSSj1c9m9TIxMXFXa+3KlZYNbVhIklph3mpJ/nlJbl4M1gOPb63dV1VPTHJ7Vf1Ra+29p+2stdcleV2SXHnlle3AgQMdlH1m8/Pz2YzjbJZR62eUehmlz2aUeklGq59R6iUZrX5GqZdktPoZpV6S0epnO/QyzGEhR5NcvGT6oiT3rbLu87JsSEhr7b7Bv/cmmc/p47EBAGDbGWa4viPJZVV1aVU9NP0A/UV3/aiqJyfZk+R3lszbU1UPG7y/MMkzkty9fFsAANhOhjYspLV2sqquS3Jbkl1JbmqtvbuqbkxyZ2ttMWhPJjncTh/8PZbkP1bV59P/A+A1S+8yAgAA29Ewx1yntXZrkluXzbt+2fQNK2z320m+epi1AQBA1zyhEQAAOiJcAwBAR4RrAADoyFDHXMMwVK10C/WzLxvWA5MAABY5c82O01pb8dXr9VZdJlgDAJtBuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0JGhhuuquqqq3lNV91TVy1ZY/tNV9a7B60+q6mNLlj2/qv508Hr+MOsEAIAu7B7WjqtqV5LXJnlmkqNJ7qiqW1prdy+u01p70ZL1DyX52sH7vUleleTKJC3JXYNtjw+rXgAAeLCGeeb6aUnuaa3d21r7bJLDSa4+w/qTSeYG75+V5B2ttWODQP2OJFcNsVYAAHjQqrU2nB1XfXuSq1pr3zOYvibJ01tr162w7hOS/G6Si1prp6rqXyV5eGvthwfLX5nk0621n1y23bVJrk2Sffv2XXH48OGh9LLUiRMnct555w39OJtllPoZpV6S0epnlHpJRqufUeolGa1+RqmXZLT6GaVektHqZ7N6mZiYuKu1duVKy4Y2LCRJrTBvtST/vCQ3t9ZOrWfb1trrkrwuSa688sp24MCBDZS5PvPz89mM42yWUepnlHpJRqufUeolGa1+RqmXZLT6GaVektHqZ5R6SUarn+3QyzCHhRxNcvGS6YuS3LfKus/LF4aErHdbAADYFoYZru9IcllVXVpVD00/QN+yfKWqenKSPUl+Z8ns25J8U1Xtqao9Sb5pMA8AALatoQ0Laa2drKrr0g/Fu5Lc1Fp7d1XdmOTO1tpi0J5McrgtGfzdWjtWVT+UfkBPkhtba8eGVSsAAHRhmGOu01q7Ncmty+Zdv2z6hlW2vSnJTUMrDgAAOuYJjQAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGhiaubm5jI+P5+DBgxkfH8/c3NxWlwQAQ7V7qwsARtPc3Fymp6czOzubU6dOZdeuXZmamkqSTE5ObnF1ADAczlwDQzEzM5PZ2dlMTExk9+7dmZiYyOzsbGZmZra6NAAYGuEaGIqFhYXs37//tHn79+/PwsLCFlUEAMMnXANDMTY2liNHjpw278iRIxkbG9uiigBg+IRrYCimp6czNTWVXq+XkydPptfrZWpqKtPT01tdGgAMjQsagaFYvGjx0KFDWVhYyNjYWGZmZlzMCMBIE66BoZmcnMzk5GTm5+dz4MCBrS4HAIbOsBAAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgw1XFfVVVX1nqq6p6petso6/7iq7q6qd1fVLy2Zf6qq3jV43TLMOgEAoAu7h7XjqtqV5LVJnpnkaJI7quqW1trdS9a5LMnLkzyjtXa8qr5syS4+3Vp76rDqAwCArg3zzPXTktzTWru3tfbZJIeTXL1sne9N8trW2vEkaa19eIj1PChzc3MZHx/PwYMHMz4+nrm5ua0uCQCAbaZaa8PZcdW3J7mqtfY9g+lrkjy9tXbdknXeluRPkjwjya4kN7TWfm2w7GSSdyU5meQ1rbW3rXCMa5NcmyT79u274vDhw0Pp5Z3vfGdmZ2fz0pe+NJdeemne97735Sd+4icyNTWVgwcPDuWYm+XEiRM577zztrqMToxSL8lo9TNKvSSj1c8o9ZKMVj+j1EsyWv2MUi/JaPWzWb1MTEzc1Vq7csWFrbWhvJI8N8nrl0xfk+Rnl63zq0n+a5KHJLk0/eEjjxose+zg3ycmeX+SJ53peFdccUUblssvv7zdfvvtrbXWer1ea62122+/vV1++eVDO+ZmWexnFIxSL62NVj+j1Etro9XPKPXS2mj1M0q9tDZa/YxSL62NVj+b1UuSO9sqmXSYw0KOJrl4yfRFSe5bYZ3/1lr7XGvtfUnek+SyJGmt3Tf4994k80m+doi1ntHCwkL2799/2rz9+/dnYWFhiyoCAGA7Gma4viPJZVV1aVU9NMnzkiy/68fbkkwkSVVdmOSrktxbVXuq6mFL5j8jyd3ZImNjYzly5Mhp844cOZKxsbEtqggAgO1oaOG6tXYyyXVJbkuykOQtrbV3V9WNVfUtg9VuS3J/Vd2dpJfkpa21+5OMJbmzqv5gMP81bcldRjbb9PR0pqam0uv1cvLkyfR6vUxNTWV6enqrSgIAYBsa2q34kqS1dmuSW5fNu37J+5bkxYPX0nV+O8lXD7O29ZicnEySHDp0KAsLCxkbG8vMzMwD8wEAIBlyuB4lk5OTmZyczPz8fA4cOLDV5QAAsA15/DkAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrgP/b3t0H2VXQZxz/PpPFJhBbkZdUSNrQNpVtdzRCJsUmzWQNxmAp1NdhhzqxbE07Iy/2xRbcKVboTu3oOO10mCq6NMwIm9EoyCjGpDapZlosBAIkLIgFwRRqaMFqChUSnv5xzg7LZtds9p69J/fs85nZ2XvPPffe55mb7P723HPuiYiIqEiG64iIiIiIimS4joiIiIioSIbriIiIiIiKZLiOiIiImIWGh4fp6elhzZo19PT0MDw8XHekRuiqO0BEREREtNfw8DADAwMMDQ1x6NAh5syZQ39/PwB9fX01p+ts2XIdERERMcsMDg4yNDREb28vXV1d9Pb2MjQ0xODgYN3ROl6G64iIiIhZZmRkhJUrV75s2cqVKxkZGakpUXNkuI6IiIiYZbq7u9m5c+fLlu3cuZPu7u6aEjVHhuuIiIiIWWZgYID+/n62b9/OwYMH2b59O/39/QwMDNQdrePlgMaIiIiIWWb0oMXLLruMkZERuru7GRwczMGMFchwHRERETEL9fX10dfXx44dO1i9enXdcRoju4VERERERFQkw3VEREREREUyXEdEREREVCTDdURERERERWZ0uJa0TtJDkr4j6cpJ1nm3pAck7ZV085jl6yU9XH6tn8mcERERERFVmLFPC5E0B7gOeDOwD7hT0m22HxizzhLgKmCF7WcknVoufzXwYWAZYGBXed9nZipvRERERESrZnLL9XLgO7Yfsf08sAm4cNw67wOuGx2abe8vl78F2Gb76fK2bcC6GcwaEREREdEy2Z6ZB5beCayz/Xvl9fcAv2b70jHr3Ap8G1gBzAH+wvYWSX8CzLX9l+V6fw48Z/vj455jA7ABYMGCBWdv2rRpRrqMdeDAAebPnz/jz9MuTerTpC7QrD5N6gLN6tOkLtCsPk3qAs3q06Qu0Kw+7erS29u7y/ayiW6byZPIaIJl4yf5LmAJsBpYCHxTUs8U74vt64HrAZYtW+Z2fAB60z5ovUl9mtQFmtWnSV2gWX2a1AWa1adJXaBZfZrUBZrV51joMpO7hewDFo25vhB4YoJ1vmT7BduPAg9RDNtTuW9ERERExDFlJofrO4Elks6Q9ArgIuC2cevcCvQCSDoZ+GXgEeBrwFpJJ0o6EVhbLouIiIiIOGbN2G4htg9KupRiKJ4D3GB7r6RrgLts38ZLQ/QDwCHgg7b/G0DStRQDOsA1tp+eqawREREREVWYyX2usX07cPu4ZVePuWzgj8qv8fe9AbhhJvNFRERERFQpZ2iMiIiIiKhIhuuIiIiIiIpkuI6IiIiIqEiG64iIiIiIimS4joiIiIioSIbriIiIiIiKZLiOiIiIiKiIio+a7nySngIea8NTnQz8Vxuep12a1KdJXaBZfZrUBZrVp0ldoFl9mtQFmtWnSV2gWX3a1eXnbZ8y0Q2NGa7bRdJdtpfVnaMqTerTpC7QrD5N6gLN6tOkLtCsPk3qAs3q06Qu0Kw+x0KX7BYSEREREVGRDNcRERERERXJcH30rq87QMWa1KdJXaBZfZrUBZrVp0ldoFl9mtQFmtWnSV2gWX1q75J9riMiIiIiKpIt1xERERERFclwHRERERFRkQzXUyTpBkn7Je2pO0urJC2StF3SiKS9kq6oO1MrJM2V9G+S7i37fKTuTK2SNEfSPZK+XHeWVkn6rqT7Je2WdFfdeVoh6VWSNkt6sPz/88a6M02XpNeWr8no1w8lfaDuXNMl6Q/L//97JA1Lmlt3plZIuqLssrcTX5eJfmdKerWkbZIeLr+fWGfGqZqky7vK1+ZFSR31EXaT9PlY+XPtPkm3SHpVnRmnapIu15Y9dkvaKum0dufKcD11G4F1dYeoyEHgj213A+cA75f0KzVnasWPgTfZfj2wFFgn6ZyaM7XqCmCk7hAV6rW9tO7PHq3A3wJbbJ8JvJ4Ofo1sP1S+JkuBs4FngVtqjjUtkk4HLgeW2e4B5gAX1Ztq+iT1AO8DllP8Oztf0pJ6Ux21jRz+O/NK4Ou2lwBfL693go0c3mUP8HbgG21P07qNHN5nG9Bj+3XAt4Gr2h1qmjZyeJeP2X5d+bPty8DV7Q6V4XqKbH8DeLruHFWw/aTtu8vLP6IYEE6vN9X0uXCgvHpc+dWxR+pKWgj8JvCZurPESyT9NLAKGAKw/bztH9SbqjJrgH+33Y6z3M6ULmCepC7geOCJmvO0ohu4w/aztg8C/wy8reZMR2WS35kXAjeWl28EfrutoaZpoi62R2w/VFOklkzSZ2v5bw3gDmBh24NNwyRdfjjm6gnUMA9kuJ7lJC0G3gB8q94krSl3o9gN7Ae22e7kPn8D/CnwYt1BKmJgq6RdkjbUHaYFvwA8BfxDucvOZySdUHeoilwEDNcdYrps/wfwceBx4Engf2xvrTdVS/YAqySdJOl44K3AopozVWGB7Seh2MgDnFpznpjYJcBX6w7RCkmDkr4HXEy2XEc7SZoPfAH4wLi/9DqO7UPlW0ALgeXl26odR9L5wH7bu+rOUqEVts8CzqPYBWlV3YGmqQs4C/h7228A/pfOeVt7UpJeAVwAfL7uLNNV7rt7IXAGcBpwgqTfqTfV9NkeAf6a4q36LcC9FLvzRcwoSQMU/9ZuqjtLK2wP2F5E0ePSdj9/hutZStJxFIP1Tba/WHeeqpRv0++gc/ePXwFcIOm7wCbgTZI+W2+k1th+ovy+n2Kf3uX1Jpq2fcC+Me+KbKYYtjvdecDdtr9fd5AWnAs8avsp2y8AXwR+veZMLbE9ZPss26so3vZ+uO5MFfi+pNcAlN/315wnxpC0HjgfuNjNOQnKzcA72v2kGa5nIUmi2G90xPYn6s7TKkmnjB7ZLGkexS/aB+tNNT22r7K90PZiirfq/8l2x26Bk3SCpFeOXgbWUrzl3XFs/yfwPUmvLRetAR6oMVJV+ujgXUJKjwPnSDq+/Pm2hg4+2BRA0qnl95+jOHCu018jgNuA9eXl9cCXaswSY0haB/wZcIHtZ+vO04pxB/9eQA3zQFe7n7BTSRoGVgMnS9oHfNj2UL2ppm0F8B7g/nI/ZYAP2b69xkyteA1wo6Q5FH8wfs52x3+EXUMsAG4p5h26gJttb6k3UksuA24qd6V4BPjdmvO0pNyf983A79edpRW2vyVpM3A3xVva93AMnAK5RV+QdBLwAvB+28/UHehoTPQ7E/go8DlJ/RR/EL2rvoRTN0mXp4G/A04BviJpt+231Jdy6ibpcxXwU8C28uf1Hbb/oLaQUzRJl7eWG0FeBB4D2t4jpz+PiIiIiKhIdguJiIiIiKhIhuuIiIiIiIpkuI6IiIiIqEiG64iIiIiIimS4joiIiIioSIbriIhjnKRDknZL2iPp8+VH6P2k9T807vq/tPDc75V02iS3XSPp3AmWr5aUj8OMiFkpw3VExLHvOdtLbfcAz3Pkz2192XBtu5WzFb6X4pTih7F9te1/bOGxIyIaJ8N1RERn+SbwSwCSbpW0S9JeSRvKZR8F5pVbum8qlx0YvbOkD0q6U9J9kj5SLlssaUTSp8vH2ippnqR3AssoTpyzuzwDKmMea2O5DpLWSXpQ0k6KMwpGRMxKGa4jIjqEpC7gPOD+ctElts+mGIAvl3SS7St5aUv3xePuvxZYoYLxRwAAAXNJREFUAiwHlgJnS1pV3rwEuM72rwI/AN5hezNwF3Bx+XjPTZJrLvBp4LeA3wB+trrWERGdJcN1RMSxb56k3RSD7uPAULn8ckn3AncAiygG5J9kbfl1D8Wpws8cc59Hbe8uL+8CFh9FvjPL+z/s4rS/nz2K+0ZENEpX3QEiIuKInrO9dOwCSauBc4E32n5W0g5g7hEeR8Bf2f7UuMdaDPx4zKJDwMt2AZkCH+X6ERGNlC3XERGd6WeAZ8rB+kzgnDG3vSDpuAnu8zXgEknzASSdLunUIzzPj4BXHmGdB4EzJP1ieb3vyPEjIpopw3VERGfaAnRJug+4lmLXkFHXA/eNHtA4yvZW4GbgXyXdD2zmyIPzRuCTEx3QOOZx/w/YAHylPKDxsWn0iYhoBBW7x0VERERERKuy5ToiIiIioiIZriMiIiIiKpLhOiIiIiKiIhmuIyIiIiIqkuE6IiIiIqIiGa4jIiIiIiqS4ToiIiIioiL/D0+/g1YpUHEKAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 864x720 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "fig = plt.figure(figsize=(12,10))\n",
    "plt.grid()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.suptitle('Cross-Validation Accuracy\\n basic CNN')\n",
    "ax = plt.gca()\n",
    "plt.xlabel('Patient id')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.savefig('fig.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Image Time Window"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## CNN Parallel Model (A)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ParallelCNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ParallelCNN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv2 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.conv3 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv4 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.conv5 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv6 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.conv7 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv8 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.conv9 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv10 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.conv11 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv12 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.conv13 = nn.Conv2d(3,3,3, padding=1)\n",
    "        self.conv14 = nn.Conv2d(3,3,5, padding=2)\n",
    "        self.pool = nn.MaxPool2d(2,2)\n",
    "        self.fc1 = nn.Linear(3549,512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.Softmax()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        batch_size = x.shape[0]\n",
    "        x[:,0] = F.relu(self.conv1(x[:,0]))\n",
    "        x[:,1] = F.relu(self.conv3(x[:,1]))\n",
    "        x[:,2] = F.relu(self.conv5(x[:,2]))\n",
    "        x[:,3] = F.relu(self.conv7(x[:,3]))\n",
    "        x[:,4] = F.relu(self.conv9(x[:,4]))\n",
    "        x[:,5] = F.relu(self.conv11(x[:,5]))\n",
    "        x[:,6] = F.relu(self.conv13(x[:,6]))\n",
    "        x[:,0] = F.relu(self.conv2(x[:,0]))\n",
    "        x[:,1] = F.relu(self.conv4(x[:,1]))\n",
    "        x[:,2] = F.relu(self.conv6(x[:,2]))\n",
    "        x[:,3] = F.relu(self.conv8(x[:,3]))\n",
    "        x[:,4] = F.relu(self.conv10(x[:,4]))\n",
    "        x[:,5] = F.relu(self.conv12(x[:,5]))\n",
    "        x[:,6] = F.relu(self.conv14(x[:,6]))\n",
    "        x = x[:,:,:,3:29,3:29]\n",
    "        x = x.reshape(batch_size, x.shape[2], x.shape[1]*x.shape[3],-1) # img reshape\n",
    "        x = self.pool(x)\n",
    "        x = x.view(batch_size,-1)\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:45: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training Fold 1/5\t of Patient 1\n",
      "[1,  30] loss: 1.414\tAccuracy : 0.517\t\tval-loss: 1.359\tval-Accuracy : 0.459\n",
      "[6,  30] loss: 1.117\tAccuracy : 0.748\t\tval-loss: 1.088\tval-Accuracy : 0.757\n",
      "[11,  30] loss: 0.913\tAccuracy : 0.830\t\tval-loss: 0.968\tval-Accuracy : 0.784\n",
      "[16,  30] loss: 0.862\tAccuracy : 0.830\t\tval-loss: 0.925\tval-Accuracy : 0.811\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-10-a0847679b8ae>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     36\u001b[0m                 \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     37\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 38\u001b[1;33m                 \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     39\u001b[0m                 \u001b[0moptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     40\u001b[0m                 \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[0;32m    164\u001b[0m                 \u001b[0mproducts\u001b[0m\u001b[1;33m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    165\u001b[0m         \"\"\"\n\u001b[1;32m--> 166\u001b[1;33m         \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    168\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\autograd\\__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[0;32m     97\u001b[0m     Variable._execution_engine.run_backward(\n\u001b[0;32m     98\u001b[0m         \u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 99\u001b[1;33m         allow_unreachable=True)  # allow_unreachable flag\n\u001b[0m\u001b[0;32m    100\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    101\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "n_fold = 5    \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_fold,n_patient))\n",
    "fold_loss = np.zeros((n_fold,n_patient))\n",
    "fold_vacc = np.zeros((n_fold,n_patient))\n",
    "fold_acc = np.zeros((n_fold,n_patient))\n",
    "for patient in np.unique(Patient):\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "\n",
    "    length = len(id_patient)\n",
    "    \n",
    "    train_id, test_id = kfold(length,n_fold)\n",
    "    \n",
    "    for fold in range(n_fold):\n",
    "        X_train = tmp[id_patient[train_id[fold]]]\n",
    "        X_test = tmp[id_patient[test_id[fold]]]\n",
    "        y_train = Label[id_patient[train_id[fold]]]\n",
    "        y_test = Label[id_patient[test_id[fold]]] \n",
    "\n",
    "        print(\"Begin Training Fold %d/%d\\t of Patient %d\" % \n",
    "             (fold+1,n_fold, patient))\n",
    "\n",
    "        CNN = ParallelCNN().cuda(0)\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n",
    "\n",
    "        n_epochs = 30\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 4\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "\n",
    "            #acc\n",
    "            _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n",
    "            acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n",
    "\n",
    "            #val Loss\n",
    "            val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n",
    "            val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n",
    "            _, idx = torch.max(val_outputs.data,1)\n",
    "            val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n",
    "\n",
    "            if epoch%5==0:\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "        fold_vloss[fold, p ] = val_loss.item()\n",
    "        fold_loss[fold, p] = running_loss/i\n",
    "        fold_vacc[fold, p] = val_acc\n",
    "        fold_acc[fold, p] = acc\n",
    "        print('Finish Training Fold %d/%d\\t of Patient %d' % \n",
    "             (fold+1,n_fold, patient))\n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Peresented Results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtAAAAKUCAYAAAAtng/mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdfZyld10f/M/XXR6UhLBLMAIBEgXpyNYHSBFl9Z5xfQjcCFVKyyAIdJT2fkm0YqngeMNCO61PVVul9kZGAQMTAdFGTAVKZtRVqTwINGREQ8pDBCWQJSGghV1+9x/nLEwm+zC/2TlzZs6+36/Xee2c61zXub7fOTNnP/M7v+u6qrUWAABgY75k3AUAAMBuIkADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABRqCqnlFVR9bcv72qvnIj625iX/+9qp6+2e0B6CNAAztCVT2lqt4+DJofHYbCg2Oq5e5V9cmq+raTPPYLVfW63udsrZ3XWrtxC2o7XFVXrnvux7TWXnG2z32GfbaqeuSo9gGwmwjQwNhV1XOS/GKSf5/koiQPTPJfkjzhFOvvHWU9rbW/T/KbSb5/3X73JJlNMrKwutNUVSV5WpJbkmzrKHcN+H8K2HG8MQFjVVUXJHlxkh9qrb2+tfbp1trnWmu/21p77nCdw1X1uqq6sqpuS/KMqrpbVf1iVX1kePvFqrrbcP0Lq+oNw1HkW6rqj04Esar68ar666r6VFW9r6oOnaK0VyR5YlV92Zpl35XB++Z/Hz7X86rq/cPnur6qvuc0fbaqevDw63tX1dVVdVtV/VmSr1q37n+qqg8PH39HVX3LcPnlSX4iyT8bjtS/e7h8pap+YPj1l1TVT1bVB6vqY1X1yuH3OFV1ybCOp1fVh6rq41U1f4aX6FuS3C/JjyR5clXddV2tP1hVq2u+Bw8fLn9AVb2+qm6uqk9U1S8Pl99hBH1NTXvX9LJQVX+c5DNJvrKqnrlmHzdW1b9YV8MTqupdw+/X+6vq8qp6UlW9Y916P1ZVv3OGfgHOSIAGxu2bktw9yW+fYb0nJHldknsleVWS+SSPSvL1Sb4uySOT/ORw3R9LclOS+2Qwov0TSVpVPTTJs5P8o9ba+RkE4g+cbGettT9J8tEk37tm8dOSvLq1dmx4//0ZBMwLkrwoyZVVdd8N9PySJH+f5L5J/vnwttbbhn3tT/LqJK+tqru31n4/g1H63xxOCfm6kzz3M4a3mSRfmeS8JL+8bp2DSR6a5FCSF1TV1GlqfXqS381gRD5JHnfigap6UpLDGYzU3zPJ45N8YjhS/4YkH0xySZL7J7nqNPtY72lJnpXk/OFzfGy433smeWaSX1gT1B+Z5JVJnpvBz8a3ZvCaXp3k0nW9PTXJb3TUAXBSAjQwbvdO8vE1ofRU/rS19juttc+31v4uyfcleXFr7WOttZszCLBPG677uQzC6YOGo9l/1FprSY4nuVuSr6mqu7TWPtBae/9p9vnKDKdxVNU9MwjxX5i+0Vp7bWvtI8OafjPJX2UQ5E9pGC6fmOQFw9H267JuSkhr7crW2idaa8daa/9xWPNDz/D9OeH7kvx8a+3G1trtSZ6fwcjx2mkvL2qt/V1r7d1J3p3BHyAnq/XLkjwpgz8aPpfBHzBrp3H8QJKfaa29rQ3c0Fr74PB7cL8kzx32+PettZ6DJF/eWnvvsP/PtdZ+r7X2/uE+/iDJmzL4wyVJ5pL8WmvtzcPX4a9ba3/RWvs/GYT+pw57eVgGYf4NHXUAnJQADYzbJ5JcuIF5zR9ed/9+GYxOnvDB4bIk+dkkNyR50/Aj/+clSWvthiT/KoNR049V1VVVdb/kC2fJOHF74PB5Xplkpqrun+SfJLmhtfbnJ3ZYVd8/nDrwyar6ZJIDSS48Qx/3SbJ3XT9r+zgx1WC1qm4dPu8FG3jeE072fdmbwUj8CX+z5uvPZDBKfTLfk+RYkmuG91+V5DFVdZ/h/QdkMAq/3gOSfHADfxSdyh1e66p6TFW9dTgd55NJHpsvfj9OVUMy+MPkKVVfmMf9mmGwBjgrAjQwbn+awXSGf3yG9dq6+x9J8qA19x84XJbW2qdaaz/WWvvKJN+d5Dkn5jq31l7dWjs43LYl+enh8vPW3D40XPahJH+Uwaju0zII1EmSqnpQkl/NYErIvVtr90pyXZI6Qx83ZxBKH7Cu9hPP+y1JfjzJP02yb/i8t6553vXfh/VO9n05luRvz7DdyTw9g3D9oar6mySvTXKXDA6kTAZB96tOst2HkzzwFH8UfTrJ2nnlX3GSdb7QYw3mtf9Wkp9LctHw+3FNvvj9OFUNaa29NclnMxitfkpM3wC2iAANjFVr7dYkL0jykqr6x1X1ZVV1l+Go48+cZtOlJD9ZVfepqguHz3FlklTV46rqwcORx9symLpxvKoeWlXfNgxlf5/k74aPnc4rMgjJj85gBPaEe2QQ9G4e7vOZGYxAn6nf40len+TwsNevyR2nRZyfQeC9OcneqnpBBnN/T/jbJJfUqc9OsZTkR6vq0qo6L1+cM901GjwcdT+Uwdzjr88X55r/9Jp6X5bkX1fVI2rgwcM/LP4sg/njP1VV96jBaQEfPdzmXUm+taoeODy48flnKOWuGUxhuTnJsap6TJLvXPP4YpJnVtWhGhxAef+q+gdrHn9lBnPAj3VOIwE4JQEaGLvW2s8neU4GBwHenMGo4rOTnO6MCf8uyduTvCfJ/0ryzuGyJHlIkv+R5PYMRrj/S2ttJYMg9lNJPp7BNIYvz+AAw9N5XZJ9Sd7SWvvompqvT/Ifh8//t0n+YZI/3ki/w97OG9bw8iS/vuaxN2Zwlo+/zGD6xd/njlMaXjv89xNV9c6TPPevZTDS+odJ/vdw+ys2WNdaT0vyrtbam1prf3PiluQ/J/naqjrQWnttkoUMDnT8VAav1/7hHwnfneTBST6UwQGd/yxJWmtvzmBu8nuSvCNnmJPcWvtUkh9O8pokRzMYSb56zeN/luGBhRmM1P9B7jgC/xsZ/GFj9BnYMjU4rgYAJk9VfWkGZ/F4eGvtr8ZdDzAZjEADMMn+nyRvE56BrTTSq3kBwLhU1QcyONjwTAeoAnQxhQMAADqYwgEAAB0EaIAdrKo+UFXfPvz6cFVducHtVqrqB0ZbHcC5SYAG6FRVrao+Pbxq4V9X1c8PL9G9a1TVV1fVa6vq48MrHr6nqp5TVXuq6pJhj7+3bpsrq+rw8Ovp4TovWbfOkap6xvZ1ArD9BGiAzfm61tp5GVxs5ClJfrD3CTZw+fKRqKqvSvI/Mzi/9D9srV2Q5ElJLsvgQi4nPGrNBVBO5tNJvr+qLhlRqQA7kgANcBZaa3+RweW+DyRJVT2vqt5fVZ+qquur6ntOrFtVz6iqP66qX6iqWzK4GuFXVdW1VfWJ4Wjwq6rqXhvZd1U9qqr+pKo+WVXvrqrpDZb9oiR/0lp7zomLw7TW3tdae0pr7ZNr1vuZfPHiNCfzyQwuBPPCDe4XYCII0ABnYXgp7m9J8ufDRe8f3r8gg6B6ZVXdd80m35jkxgyugriQwWnW/kOS+yWZSvKAJIc3sN/7J/m9DALu/iT/OslvVdV9NlD2t2dwhcUzeUmSrz4xB/sUFpI8saoeuoHnA5gIAjTA5ryzqo4m+d0kL8vwctyttde21j7SWvt8a+03k/xVkkeu2e4jrbVfaq0da639XWvthtbam1tr/6e1dnOSn0/yf21g/09Nck1r7Zrhvt6cwaXNH7uBbe+d5KNnXGtwGfCFnGYUenh57/+a5MUbeD6AieBCKgCb8/DW2g3rF1bV9yd5TpJLhovOS3LhmlU+vG79L0/ynzMYtT4/g4GNoxvY/4OSPKmqvnvNsrskWd7Atp9Ict8zrjXwq0meu24/6/10kvdX1ddt8DkBdjUj0ABbpKoelEHgfHaSe7fW7pXkugymaZyw/upV/2G47Gtba/fMYGS5cmYfTvIbrbV7rbndo7X2UxvY9n8keeIG1ktr7XMZTEX5t6eqq7X2iSS/OFwHYOIJ0ABb5x4ZhOGbk6SqnpnhwYWncX6S25N8cjiv+bkb3NeVSb67qr5reOq5uw9PLXfxBrZ9YZJvrqqfraqvGNb64OFp6k52AONvJLlbkstP85w/n+SbM5jHDTDRBGiALdJauz7Jf0zyp0n+Nsk/TPLHZ9jsRUkenuTWDA4KfP0G9/XhJE9I8hMZBPYPZxC+z/i+3lp7f5JvymCayXur6tYkv5XBHOpPnWT94xmE7v2nec7bMjhrxynXAZgU1dr6TxMBAIBTMQINAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBh77gL6HXhhRe2Sy65ZOT7+fSnP5173OMeI9/PdpikXpLJ6meSekkmq59J6iWZrH4mqZdksvqZpF6SyepnknpJtq+fd7zjHR9vrd1n/fJdF6AvueSSvP3tbx/5flZWVjI9PT3y/WyHSeolmax+JqmXZLL6maReksnqZ5J6SSarn0nqJZmsfiapl2T7+qmqD55suSkcAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCgw8gCdFX9WlV9rKquO8XjVVX/uapuqKr3VNXDR1ULAABslVGOQL88yeWnefwxSR4yvD0rya+MsBYAANgSIwvQrbU/THLLaVZ5QpJXtoG3JrlXVd13VPUAAMBWqNba6J686pIkb2itHTjJY29I8lOttSPD+29J8uOttbefZN1nZTBKnYsuuugRV1111chqPuH222/PeeedN/L9bIdJ6iXZnf3MzMxsarvl5eUtruTsTVIvp7Mbf85OZ5L6maReksnqZ5J6SSarn0nqJdm+fmZmZt7RWrts/fK9I9/zqdVJlp00zbfWXprkpUly2WWXtenp6RGWNbCyspLt2M92mKRekt3Zz6n+UK2qUz62U52u3t3Yz6nsxp+z05mkfiapl2Sy+pmkXpLJ6meSeknG3884z8JxU5IHrLl/cZKPjKkWAADYkHEG6KuTfP/wbByPSnJra+2jY6wHAADOaGRTOKpqKcl0kgur6qYkL0xylyRprf3XJNckeWySG5J8JskzR1ULAABslZEF6Nba7Bkeb0l+aFT7BwCAUXAlQgAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADnvHXQAAAKxXVZvarrW2xZXcmQANAMCOc7ogXFXbEpRPxRQOAADoIEADAEAHARo4qf3796equm5JurfZv3//mDsFgD4CNHBSR48eTWut67a8vNy9zdGjR8fdKgB0EaABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgBgbPbv35+q6rol6d5m//79W1azAA0AwNgcPXo0rbWu2/Lycvc2R48e3bKa927ZMzF2J/4i69Va2+JKAFjL+zNMFgF6gpzqjbaqvAkDjNHp3oO9R8PuYwoHAAB0MAINW2T//v2bml/V+9Huvn37csstt3TvB3Yb0x6AncoINGyR3XgQBOxkp/odON1jwjOwHYxAAyfVXnjP5PAFXdtMJ8nKJvYDALuIAA2c3OFbuzdZWVnJ9PT01tcCADuIKRwAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADnvHXQAAAOeu9sJ7Jocv6NpmOklWNrGfLSJAAwAwPodv7d5kZWUl09PTW1/LBpnCAQAAHQRoAADoIEADwBbZv39/qqrrlqR7m/3794+5Uzi3CdAAsEWOHj2a1lrXbXl5uXubo0ePjrtVOKc5iBAAYJfZv3//yP+Q2rdvX2655ZaR7mO3MgINALDL9H7a4ZOOrWUEGrbIbjyPJfnCHNRerbUtruTctZmRtN7XzUgasJUEaNgi9aLbukPVZs5jWVVph7s24TRO9ZpVlZC8TU6MpG3UZn9vALaKKRwAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBAh73jLoA++/fvz9GjR7u3q6qu9fft25dbbrmlez8A57L2wnsmhy/o2mY6SVY2sR9gbAToXebo0aNprXVts7Kykunp6a5tegM3AEm96LZte49uh7s2AbbQSAN0VV2e5D8l2ZPkZa21n1r3+IOS/FqS+yS5JclTW2s3jbImAIDdrvfTjunEJx1baGQBuqr2JHlJku9IclOSt1XV1a2169es9nNJXtlae0VVfVuS/5DkaaOqCQBgIhy+tWv1qur+dIRTG+VBhI9MckNr7cbW2meTXJXkCevW+Zokbxl+vXySxwEAYEcZ5RSO+yf58Jr7NyX5xnXrvDvJEzOY5vE9Sc6vqnu31j6xdqWqelaSZyXJRRddlJWVlVHV/AW33377tuxnM3rr2mwvO7X/nfzabMfc8fPPP3/H9f+Wt7wlV155ZT70oQ/lgQ98YJ761Kfm0KFD4y7rCx7/+MfnU5/6VPd2va/n+eefn6uvvrp7P9thJ//eJH3vNzv9Pe1cfo/e6T9nvSatn0nqZeyvTWttJLckT8pg3vOJ+09L8kvr1rlfktcn+fMMQvRNSS443fM+4hGPaNtheXl5W/bTa/CS9dlML5vZz3bZqa/NZkxCL69+9avbpZde2q699tr25je/uV177bXt0ksvba9+9avHXdoX+L3Z2T9rvd+3nfzanOs/azv552wzJqmfnfozs1nb9dokeXs7SR4d5RSOm5I8YM39i5N8ZO0KrbWPtNa+t7X2DUnmh8v6JvUAY7WwsJDFxcXMzMxk7969mZmZyeLiYhYWFsZdGgCMxCgD9NuSPKSqLq2quyZ5cpI7fLZZVRdW1Ykanp/BGTmAXWR1dTUHDx68w7KDBw9mdXV1TBUBwGiNLEC31o4leXaSNyZZTfKa1tp7q+rFVfX44WrTSd5XVX+Z5KIkhqxgl5mamsqRI0fusOzIkSOZmpoaU0UAMFojPQ90a+2aJNesW/aCNV+/LsnrRlkDMFrz8/OZm5vL4uJijh8/nuXl5czNzZnCAcDEciVC4KzMzs4mSa644oqsrq5mamoqCwsLX1gOAJNGgAbO2uzsbGZnZzd1SWK2zmZPo9hcXAGgiwANMCFOF4RdhQxg64zyLBwAADBxzukRaB93AgDQ65wegT7ZlWVO3E73OADwRUtLSzlw4EAOHTqUAwcOZGlpadwlwUid0yPQAMDZWVpayvz8/BdOZblnz57Mzc0libPxMLHO6RFoAODsLCwsZHFxMTMzM9m7d29mZmayuLjoXPBMNAEaANi01dXVHDx48A7LDh48mNXV1TFVBKMnQAMAmzY1NZUjR47cYdmRI0cyNTU1popg9ARoAGDT5ufnMzc3l+Xl5Rw7dizLy8uZm5vL/Pz8uEuDkXEQIQCwaScOFLziiiuyurqaqampLCwsOICQiSZAAwBnZXZ2NrOzs1lZWcn09PS4y4GRM4UDAAA6CNAAANBBgAYAgA4CNAAAdHAQ4S7TXnjP5PAFXdtMJ8nKJvYDAMCdCNC7TL3otrTWurbZzFHRVZV2uGsTAIBzgikcAADQQYAGAIAOpnAAAEyIqtrUY73TQ891RqABACZEa+2kt+Xl5VM+Jjz3E6ABAKCDKRwAjFXv6TmnE6fmBMZKgGbXWFpaysLCQlZXVzM1NZX5+fnMzs6OuyzgbB2+tWv1qvKRMzBWAjS7wtLSUubn57O4uJjjx49nz549mZubSxIhGgDYVuZAsyssLCxkcXExMzMz2bt3b2ZmZrK4uJiFhYVxlwYAnGMEaHaF1dXVHDx48A7LDh48mNXV1TFVBACcqwRodoWpqakcOXLkDsuOHDmSqampMVUEAJyrBGh2hfn5+czNzWV5eTnHjh3L8vJy5ubmMj8/P+7SAIBzjIMI2RVOHCh4xRVXfOEsHAsLCw4gBAC2nQDNrjE7O5vZ2dmsrKxkenp63OXA+HScM/mE3nMtf3FffaeYAzgXCNAAu0y96Lbu8yBv5g/Pqko73LUJwDnBHGgAAOggQAMAQAcBGgAAOpwTAXr//v2pqq5bku5t9u/fP+ZOAQAYtXMiQB89ejStta7b8vJy9zZHjx4dd6sAAIzYORGgAQBgqwjQAADQQYAGAIAOLqQCAJyzTpw4oFfvxYyYLAI0AHDOOl0QripBmZMyhQMAADoI0AAA0MEUjl1os/O1euzbt2/k+wCYRN6jYfIJ0LvMZuZimcMFsD028167srKS6enprS8GGBlTOAAAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADs4DDUy89sJ7Jocv6NpmOklWNrEfACaeAA1MvHrRbd0XuNjMxS2qKu1w1yYA7EKmcAAAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA67B13AWydqtrUY621UZQDADCRjEBPkNbaSW/Ly8unfEx4BgDoI0ADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKBhDJaWlnLgwIEcOnQoBw4cyNLS0rhLAgA2yHmgYZstLS1lfn4+i4uLOX78ePbs2ZO5ubkkyezs7JirAwDOxAg0bLOFhYUsLi5mZmYme/fuzczMTBYXF7OwsDDu0gCADTACDdtsdXU1Bw8evMOygwcPZnV1dUwVwc7k6qrATmUEGrbZ1NRUjhw5codlR44cydTU1Jgqgp3J1VWBnUqAhm02Pz+fubm5LC8v59ixY1leXs7c3Fzm5+fHXRoAsAGmcMA2O3Gg4BVXXJHV1dVMTU1lYWHBAYTAznP4gq7Vp5NkZTP7uXUTG8H4CNAwBrOzs5mdnc3Kykqmp6fHXQ7AyXUGW+9pnCtM4QAAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGACbe/v37U1VdtyTd2+zfv3/MnbIdBGgAYOIdPXo0rbWu2/Lycvc2R48eHXerbAMBGgAAOpwxQFfVs6tq33YUAwAAO93eDazzFUneVlXvTPJrSd7YWmujLWtrtRfeMzl8Qdc200myson9AAAw0c4YoFtrP1lV/2+S70zyzCS/XFWvSbLYWnv/qAvcEodv7d6kqrLL/k4AAGAbbGgO9HDE+W+Gt2NJ9iV5XVX9zAhrAwCAHeeMI9BV9cNJnp7k40leluS5rbXPVdWXJPmrJP9mtCUCAMDOsZE50Bcm+d7W2gfXLmytfb6qHjeasgAAYGfayBSOa5LccuJOVZ1fVd+YJK211VEVBgAAO9FGAvSvJLl9zf1PD5cBAMA5ZyMButaetq619vlsbOoHAABMnI0E6Bur6oer6i7D248kuXHUhQEAwE60kQD9L5N8c5K/TnJTkm9M8qxRFgUAADvVGQN0a+1jrbUnt9a+vLV2UWvtKa21j23kyavq8qp6X1XdUFXPO8njD6yq5ar686p6T1U9djNNAADjs7S0lAMHDuTQoUM5cOBAlpaWxl0SjNRGzgN99yRzSR6W5O4nlrfW/vkZttuT5CVJviODkeu3VdXVrbXr16z2k0le01r7lar6mgzO+HFJbxMAwHgsLS1lfn4+i4uLOX78ePbs2ZO5ubkkyezs7Jirg9HYyBSO30jyFUm+K8kfJLk4yac2sN0jk9zQWruxtfbZJFclecK6dVqSew6/viDJRzZSNACwMywsLGRxcTEzMzPZu3dvZmZmsri4mIWFhXGXBiOzkbNpPLi19qSqekJr7RVV9eokb9zAdvdP8uE190/Mn17rcJI3VdUVSe6R5NtP9kRV9awM511fdNFFWVlZ2cDuz9527WfUbr/99onpJZmsfiapl2Rn99Nb12Z72anvTzu9nx47+edsM3Z7P6urqzl+/HhWVla+0Mvx48ezurq64/rye7My7jK2zNj7aa2d9pbkz4b//mGSAxlcmfDGDWz3pCQvW3P/aUl+ad06z0nyY8OvvynJ9Um+5HTP+4hHPKJth8G3ZjIsLy+Pu4QtNUn9TFIvre3cfjbz+7yZXrbrfWPS+um1U3/ONmu39/Owhz2sXXvtta21L/Zy7bXXtoc97GFjrOrO/N4sj7uELbVd/SR5eztJHt3IFI6XVtW+DOYrXz0MuT+9ge1uSvKANfcvzp2naMwlec0wyP9pBnOsL9zAcwMAO8D8/Hzm5uayvLycY8eOZXl5OXNzc5mfnx93aTAyp53CUVVfkuS21trRDEagv7Ljud+W5CFVdWkGp8B7cpKnrFvnQ0kOJXl5VU1lEKBv7tgHADBGJw4UvOKKK7K6upqpqaksLCw4gJCJdtoR6C7o5GAAABZxSURBVDa46uCzN/PErbVjw23fmGQ1g7NtvLeqXlxVjx+u9mNJfrCq3p1kKckzhsPlAMAuMTs7m+uuuy5vectbct111wnPTLyNHET45qr610l+M8mnTyxsrd1ypg1ba9dkcGq6tctesObr65M8esPVAgDAmG0kQJ843/MPrVnW0jedAwAAJsIZA3Rr7dLtKAQAAHaDjVyJ8PtPtry19sqtLwcAAHa2jUzh+Edrvr57BmfNeGcSARoAgHPORqZwXLH2flVdkMHlvQEA4JyzkRHo9T6T5CFbXQgAwKi0F94zOXxB1zbTSbKyif0w8TYyB/p3MzjrRjI4b/TXZHj1QACA3aBedFt6LzWxsrKS6enpvv1UpR3u2oRdaCMj0D+35utjST7YWrtpRPUAAMCOtpEA/aEkH22t/X2SVNWXVtUlrbUPjLQyAADYgU57Ke+h1yb5/Jr7x4fLAADgnLORAL23tfbZE3eGX991dCUBAMDOtZEAfXNVPf7Enap6QpKPj64kAADYuTYyB/pfJnlVVf3y8P5NSU56dUIAAJh0G7mQyvuTPKqqzktSrbVPjb6s7VFVm3q89zQ4AABMjjNO4aiqf19V92qt3d5a+1RV7auqf7cdxY1aa+2Ut+Xl5VM+BgDAuWsjc6Af01r75Ik7rbWjSR47upIAAGDn2kiA3lNVdztxp6q+NMndTrM+AABMrI0cRHhlkrdU1a8P7z8zyStGVxIAAOxcGzmI8Geq6j1Jvj1JJfn9JA8adWEAALATbWQKR5L8TQZXI3xikkNJVkdWEQAA7GCnHIGuqq9O8uQks0k+keQ3MziN3cw21QYAADvO6aZw/EWSP0ry3a21G5Kkqn50W6oCAIAd6nRTOJ6YwdSN5ar61ao6lMEcaAAAOGedMkC31n67tfbPkvyDJCtJfjTJRVX1K1X1ndtUHwAA7ChnPIiwtfbp1tqrWmuPS3Jxkncled7IKwMAgDWWlpZy4MCBHDp0KAcOHMjS0tJY6tjIeaC/oLV2S5L/b3gDAIBtsbS0lPn5+SwuLub48ePZs2dP5ubmkiSzs7PbWstGT2MHAABjs7CwkMXFxczMzGTv3r2ZmZnJ4uJiFhYWtr0WARoAgB1vdXU1Bw8evMOygwcPZnV1+y9PIkADALDjTU1N5ciRI3dYduTIkUxNTW17LQI0AAA73vz8fObm5rK8vJxjx45leXk5c3NzmZ+f3/Zaug4iBACAcThxoOAVV1yR1dXVTE1NZWFhYdsPIEwEaAAAdonZ2dnMzs5mZWUl09PTY6vDFA4AAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHRwGjuAXaiqRr6Pffv2jXwfALuRAA2wy7TWurepqk1tB8CdmcIBAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBh77gLAADYDlU18n3s27dv5Ptg/ARoxuvwBd2bTCfJymb2desmNgJgErTWurepqk1tx+QToBmretFt3W9OKysrmZ6e7ttPVdrhrk0AAE7KHGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHRwGjuACXGmi0Sc6nHnuQXoYwQaYEK01k55W15ePuVjAPQRoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCgw95xFwAAMC5VtanHW2ujKIddwgg0AHDOaq2d8ra8vHzKxzi3CdAAANBBgAYAgA7mQDN2Z5p/thX27ds38n0AAOcGI9CM1enmnm1mTtqpbrfccsu4WwUAJoQADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCgw0gDdFVdXlXvq6obqup5J3n8F6rqXcPbX1bVJ0dZDwAAnK29o3riqtqT5CVJviPJTUneVlVXt9auP7FOa+1H16x/RZJvGFU9AACwFUY5Av3IJDe01m5srX02yVVJnnCa9WeTLI2wHgAAOGvVWhvNE1f9kySXt9Z+YHj/aUm+sbX27JOs+6Akb01ycWvt+Ekef1aSZyXJRRdd9IirrrpqJDWvdfvtt+e8884b+X62wyT1kkxWP5PUS7Jz+5mZmdmW/Zx//vm5+uqrt2VfvXbqa7MZk9RLMln9TFIvyWT1M0m9JNvXz8zMzDtaa5etXz6yKRxJ6iTLTpXWn5zkdScLz0nSWntpkpcmyWWXXdamp6e3pMDTWVlZyXbsZztMUi/JZPUzSb0kO7efzQwU7NReNmuS+pmkXpLJ6meSekkmq59J6iUZfz+jnMJxU5IHrLl/cZKPnGLdJ8f0DQAAdoFRBui3JXlIVV1aVXfNICTf6bPNqnpokn1J/nSEtQAAwJYYWYBurR1L8uwkb0yymuQ1rbX3VtWLq+rxa1adTXJVG9VkbAAA2EKjnAOd1to1Sa5Zt+wF6+4fHmUNAACwlVyJEAAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBAh5EG6Kq6vKreV1U3VNXzTrHOP62q66vqvVX16lHWAwAAZ2vvqJ64qvYkeUmS70hyU5K3VdXVrbXr16zzkCTPT/Lo1trRqvryUdUDAABbYZQj0I9MckNr7cbW2meTXJXkCevW+cEkL2mtHU2S1trHRlgPAACctWqtjeaJq/5Jkstbaz8wvP+0JN/YWnv2mnV+J8lfJnl0kj1JDrfWfv8kz/WsJM9KkosuuugRV1111UhqXuv222/PeeedN/L9bIdJ6iWZrH4mqZdksvqZpF6SyepnknpJJqufSeolmax+JqmXZPv6mZmZeUdr7bL1y0c2hSNJnWTZ+rS+N8lDkkwnuTjJH1XVgdbaJ++wUWsvTfLSJLnsssva9PT0lhe73srKSrZjP9thknpJJqufSeolmax+JqmXZLL6maReksnqZ5J6SSarn0nqJRl/P6OcwnFTkgesuX9xko+cZJ3/1lr7XGvtfyd5XwaBGgAAdqRRBui3JXlIVV1aVXdN8uQkV69b53eSzCRJVV2Y5KuT3DjCmgAA4KyMLEC31o4leXaSNyZZTfKa1tp7q+rFVfX44WpvTPKJqro+yXKS57bWPjGqmgAA4GyNcg50WmvXJLlm3bIXrPm6JXnO8AYAADueKxECAEAHARoAADoI0AAA0EGAZtdYWlrKgQMHcujQoRw4cCBLS0vjLgkAOAeN9CBC2CpLS0uZn5/P4uJijh8/nj179mRubi5JMjs7O+bqAIBziRFodoWFhYUsLi5mZmYme/fuzczMTBYXF7OwsDDu0gCAc4wAza6wurqagwcP3mHZwYMHs7q6OqaKAIBzlQDNrjA1NZUjR47cYdmRI0cyNTU1pooAgHOVAM2uMD8/n7m5uSwvL+fYsWNZXl7O3Nxc5ufnx10aAHCOcRAhu8KJAwWvuOKKrK6uZmpqKgsLCw4gBAC2nQDNrjE7O5vZ2dmsrKxkenp63OUAAOcoUzgAAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAECHaq2Nu4YuVXVzkg9uw64uTPLxbdjPdpikXpLJ6meSekkmq59J6iWZrH4mqZdksvqZpF6SyepnknpJtq+fB7XW7rN+4a4L0Nulqt7eWrts3HVshUnqJZmsfiapl2Sy+pmkXpLJ6meSekkmq59J6iWZrH4mqZdk/P2YwgEAAB0EaAAA6CBAn9pLx13AFpqkXpLJ6meSekkmq59J6iWZrH4mqZdksvqZpF6SyepnknpJxtyPOdAAANDBCDQAAHQQoAEAoIMAvU5V/VpVfayqrht3LWerqh5QVctVtVpV762qHxl3TZtVVXevqj+rqncPe3nRuGvaClW1p6r+vKreMO5azkZVfaCq/ldVvauq3j7ues5WVd2rql5XVX8x/P35pnHXtBlV9dDha3LidltV/atx13U2qupHh+8B11XVUlXdfdw1bVZV/ciwj/fuxtflZP9fVtX+qnpzVf3V8N9946yxxyn6edLw9fl8Ve2aU8CdopefHb6nvaeqfruq7jXOGnucop9/O+zlXVX1pqq633bWJEDf2cuTXD7uIrbIsSQ/1lqbSvKoJD9UVV8z5po26/8k+bbW2tcl+fokl1fVo8Zc01b4kSSr4y5ii8y01r5+Qs4z+p+S/H5r7R8k+brs0teotfa+4Wvy9UkekeQzSX57zGVtWlXdP8kPJ7mstXYgyZ4kTx5vVZtTVQeS/GCSR2bwM/a4qnrIeKvq9vLc+f/L5yV5S2vtIUneMry/W7w8d+7nuiTfm+QPt72as/Py3LmXNyc50Fr72iR/meT5213UWXh57tzPz7bWvnb4/vaGJC/YzoIE6HVaa3+Y5JZx17EVWmsfba29c/j1pzIIAfcfb1Wb0wZuH969y/C2q4+AraqLk/zfSV427lr4oqq6Z5JvTbKYJK21z7bWPjneqrbEoSTvb61tx5VcR2lvki+tqr1JvizJR8Zcz2ZNJXlra+0zrbVjSf4gyfeMuaYup/j/8glJXjH8+hVJ/vG2FnUWTtZPa221tfa+MZW0aafo5U3Dn7UkeWuSi7e9sE06RT+3rbl7j2xzJhCgzxFVdUmSb0jyP8dbyeYNpzu8K8nHkry5tbZrexn6xST/Jsnnx13IFmhJ3lRV76iqZ427mLP0lUluTvLrw+k1L6uqe4y7qC3w5CRL4y7ibLTW/jrJzyX5UJKPJrm1tfam8Va1adcl+daqundVfVmSxyZ5wJhr2goXtdY+mgwGcZJ8+Zjr4eT+eZL/Pu4izlZVLVTVh5N8X4xAs9Wq6rwkv5XkX637i21Xaa0dH35Uc3GSRw4/At2VqupxST7WWnvHuGvZIo9urT08yWMymCr0reMu6CzsTfLwJL/SWvuGJJ/O7voY+k6q6q5JHp/kteOu5WwM59M+IcmlSe6X5B5V9dTxVrU5rbXVJD+dwcfqv5/k3RlMu4ORqqr5DH7WXjXuWs5Wa22+tfaADHp59nbuW4CecFV1lwzC86taa68fdz1bYfhx+kp291z1Ryd5fFV9IMlVSb6tqq4cb0mb11r7yPDfj2Uwx/aR463orNyU5KY1n3C8LoNAvZs9Jsk7W2t/O+5CztK3J/nfrbWbW2ufS/L6JN885po2rbW22Fp7eGvtWzP4ePqvxl3TFvjbqrpvkgz//diY62GNqnp6kscl+b42WRcCeXWSJ27nDgXoCVZVlcE8ztXW2s+Pu56zUVX3OXHEcFV9aQb/kf7FeKvavNba81trF7fWLsngo/VrW2u7ciStqu5RVeef+DrJd2bw8fSu1Fr7myQfrqqHDhcdSnL9GEvaCrPZ5dM3hj6U5FFV9WXD97dD2aUHeCZJVX358N8HZnCg2iS8Rlcnefrw66cn+W9jrIU1quryJD+e5PGttc+Mu56zte6g28dnmzPB3u3c2W5QVUtJppNcWFU3JXlha21xvFVt2qOTPC3J/xrOHU6Sn2itXTPGmjbrvkleUVV7MvjD7zWttV196rcJclGS3x7kmexN8urW2u+Pt6SzdkWSVw2nPtyY5JljrmfThvNrvyPJvxh3LWertfY/q+p1Sd6ZwUfQf57dfXni36qqeyf5XJIfaq0dHXdBPU72/2WSn0rymqqay+APnieNr8I+p+jnliS/lOQ+SX6vqt7VWvuu8VW5Mafo5flJ7pbkzcP367e21v7l2IrscIp+Hjsc6Ph8kg8m2dZeXMobAAA6mMIBAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAG2CGq6nhVvauqrquq1w5PQXe69X9i3f0/OYt9P6Oq7neKx15cVd9+kuXTVeV0ksA5R4AG2Dn+rrX29a21A0k+mzOf1/QOAbq1djZX5XtGBpfHvpPW2gtaa//jLJ4bYKII0AA70x8leXCSVNXvVNU7quq9/397d8waVRBGYfg9ECEBxUIQQYSIBAI2gYhooVhI0MJKG0mXImU6wVYs9A8IahBSqFVaQVNZCAoqxqQwYCH6BxQUo4iMxZ3VNUQ3VwST+D6wsPvdmdm53WGYYZJM1tplYKCuWN+qtQ+dzknOJXmcZCHJhVobTPIiyXQday7JQJIzwAGay2Pm622fdI01U9uQ5ESSpSQPaG7Pk6T/jgFaktaZJH3ASWCxliZKKaM0IXcqyY5Synl+rFiPr+g/BgwBB4ERYDTJ0fp4CLhSStkPvANOl1JmgSfAeB1v+Rfz6gemgVPAEWDX33trSdo4DNCStH4MJJmnCbNvgBu1PpXkOfAI2EMTgn9nrH6e0Vx7PdzV51UpZb5+fwoMtpjfcO3/sjTX2N5s0VeSNo2+fz0BSdJ3y6WUke5CkmPAceBwKeVjkvtAf49xAlwqpVxbMdYg8Lmr9BX4abvGGpSW7SVp03EFWpLWt+3A2xqeh4FDXc++JNmySp97wESSrQBJdifZ2eN/3gPberRZAvYm2Vd/n+09fUnafAzQkrS+3QX6kiwAF2m2cXRcBxY6hwg7SilzwG3gYZJFYJbe4XgGuLraIcKucT8Bk8Cdeojw9R+8jyRteGm2sUmSJElaC1egJUmSpBYM0JIkSVILBmhJkiSpBQO0JEmS1IIBWpIkSWrBAC1JkiS1YICWJEmSWvgGED8z1eXyZtQAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 864x720 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "sio.savemat('Result/Res_ParallelCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n",
    "\n",
    "fig = plt.figure(figsize=(12,10))\n",
    "plt.grid()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.suptitle('Cross-Validation Accuracy\\n Parallel CNN')\n",
    "ax = plt.gca()\n",
    "plt.xlabel('Patient id')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.savefig('Result/ParallelCNN.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## CNN Temporal Model (B)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 276,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TemporalCNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(TemporalCNN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3,3,3)\n",
    "        self.conv2 = nn.Conv2d(3,3,5)\n",
    "        self.conv3 = nn.Conv2d(3,3,3)\n",
    "        self.conv4 = nn.Conv2d(3,3,5)\n",
    "        self.conv5 = nn.Conv2d(3,3,3)\n",
    "        self.conv6 = nn.Conv2d(3,3,5)\n",
    "        self.conv7 = nn.Conv2d(3,3,3)\n",
    "        self.conv8 = nn.Conv2d(3,3,5)\n",
    "        self.conv9 = nn.Conv2d(3,3,3)\n",
    "        self.conv10 = nn.Conv2d(3,3,5)\n",
    "        self.conv11 = nn.Conv2d(3,3,3)\n",
    "        self.conv12 = nn.Conv2d(3,3,5)\n",
    "        self.conv13 = nn.Conv2d(3,3,3)\n",
    "        self.conv14 = nn.Conv2d(3,3,5)     \n",
    "        self.pool1 = nn.MaxPool2d(2)\n",
    "        self.pool2 = nn.MaxPool2d(2)   \n",
    "        self.conv15 = nn.Conv2d(3,3,5)\n",
    "        self.conv16 = nn.Conv2d(3,3,7)\n",
    "        self.fc1 = nn.Linear(120,512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.Softmax()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        batch_size = x.shape[0]\n",
    "        tmp = torch.zeros(batch_size, x.shape[1], x.shape[2],26,26).cuda()\n",
    "        tmp[:,0] = F.relu(self.conv2(F.relu(self.conv1(x[:,0]))))\n",
    "        tmp[:,1] = F.relu(self.conv4(F.relu(self.conv3(x[:,1]))))\n",
    "        tmp[:,2] = F.relu(self.conv6(F.relu(self.conv5(x[:,2]))))\n",
    "        tmp[:,3] = F.relu(self.conv8(F.relu(self.conv7(x[:,3]))))\n",
    "        tmp[:,4] = F.relu(self.conv10(F.relu(self.conv9(x[:,4]))))\n",
    "        tmp[:,5] = F.relu(self.conv12(F.relu(self.conv11(x[:,5]))))\n",
    "        tmp[:,6] = F.relu(self.conv14(F.relu(self.conv13(x[:,6]))))\n",
    "        x = torch.zeros(batch_size, x.shape[1], x.shape[2],26,26).cuda()\n",
    "        for i in range(7):\n",
    "            x[:,i] = tmp[:,i]\n",
    "        #x[:,0] = F.relu(self.conv1(x[:,0]))\n",
    "        #x[:,1] = F.relu(self.conv3(x[:,1]))\n",
    "        #x[:,2] = F.relu(self.conv5(x[:,2]))\n",
    "        #x[:,3] = F.relu(self.conv7(x[:,3]))\n",
    "        #x[:,4] = F.relu(self.conv9(x[:,4]))\n",
    "        #x[:,5] = F.relu(self.conv11(x[:,5]))\n",
    "        #x[:,6] = F.relu(self.conv13(x[:,6]))\n",
    "        #x[:,0] = F.relu(self.conv2(x[:,0]))\n",
    "        #x[:,1] = F.relu(self.conv4(x[:,1]))\n",
    "        #x[:,2] = F.relu(self.conv6(x[:,2]))\n",
    "        #x[:,3] = F.relu(self.conv8(x[:,3]))\n",
    "        #x[:,4] = F.relu(self.conv10(x[:,4]))\n",
    "        #x[:,5] = F.relu(self.conv12(x[:,5]))\n",
    "        #x[:,6] = F.relu(self.conv14(x[:,6]))\n",
    "        #x = x[:,:,:,3:29,3:29]\n",
    "        #tmp = torch.zeros(batch_size, x.shape[2], x.shape[1]*x.shape[3],x.shape[4]).cuda()\n",
    "        #for i in range(x.shape[1]):\n",
    "        #    tmp[:,:,i*x.shape[3]:(i+1)*x.shape[3], :] = x[:,i]\n",
    "        x = x.reshape(batch_size, x.shape[2], x.shape[1]*x.shape[3],-1) # img reshape\n",
    "        x = self.pool1(x)\n",
    "        x = F.relu(self.conv15(x))\n",
    "        x = F.relu(self.conv16(x))\n",
    "        x = self.pool2(x)\n",
    "        x = x.view(batch_size,-1)\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 277,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:65: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[0.2572, 0.2522, 0.2505, 0.2401],\n",
       "        [0.2591, 0.2499, 0.2525, 0.2385]], device='cuda:0',\n",
       "       grad_fn=<SoftmaxBackward>)"
      ]
     },
     "execution_count": 277,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net = TemporalCNN().cuda()\n",
    "net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 280,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training Fold 1/5\t of Patient 1\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:65: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1, 100] loss: 1.424\tAccuracy : 0.327\t\tval-loss: 1.384\tval-Accuracy : 0.243\n",
      "[11, 100] loss: 1.422\tAccuracy : 0.320\t\tval-loss: 1.384\tval-Accuracy : 0.270\n",
      "[21, 100] loss: 1.420\tAccuracy : 0.320\t\tval-loss: 1.384\tval-Accuracy : 0.270\n",
      "[31, 100] loss: 1.418\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "[41, 100] loss: 1.417\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "[51, 100] loss: 1.415\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "[61, 100] loss: 1.413\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "[71, 100] loss: 1.411\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "[81, 100] loss: 1.409\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "[91, 100] loss: 1.408\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n",
      "Finish Training Fold 1/5\t of Patient 1\n",
      "Begin Training Fold 2/5\t of Patient 1\n",
      "[1, 100] loss: 1.427\tAccuracy : 0.265\t\tval-loss: 1.390\tval-Accuracy : 0.216\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-280-7232a97298b5>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     37\u001b[0m                 \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     38\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 39\u001b[1;33m                 \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     40\u001b[0m                 \u001b[0moptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     41\u001b[0m                 \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[0;32m    164\u001b[0m                 \u001b[0mproducts\u001b[0m\u001b[1;33m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    165\u001b[0m         \"\"\"\n\u001b[1;32m--> 166\u001b[1;33m         \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    168\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\autograd\\__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[0;32m     97\u001b[0m     Variable._execution_engine.run_backward(\n\u001b[0;32m     98\u001b[0m         \u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 99\u001b[1;33m         allow_unreachable=True)  # allow_unreachable flag\n\u001b[0m\u001b[0;32m    100\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    101\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "    if opti=='SGD':\n",
    "        optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)\n",
    "    elif opti =='Adam':\n",
    "        optimizer = optip = 0\n",
    "n_fold = 5    \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_fold,n_patient))\n",
    "fold_loss = np.zeros((n_fold,n_patient))\n",
    "fold_vacc = np.zeros((n_fold,n_patient))\n",
    "fold_acc = np.zeros((n_fold,n_patient))\n",
    "for patient in np.unique(Patient):\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "\n",
    "    length = len(id_patient)\n",
    "    \n",
    "    train_id, test_id = kfold(length,n_fold)\n",
    "    \n",
    "    for fold in range(n_fold):\n",
    "        X_train = tmp[id_patient[train_id[fold]]]\n",
    "        X_test = tmp[id_patient[test_id[fold]]]\n",
    "        y_train = Label[id_patient[train_id[fold]]]\n",
    "        y_test = Label[id_patient[test_id[fold]]] \n",
    "\n",
    "        print(\"Begin Training Fold %d/%d\\t of Patient %d\" % \n",
    "             (fold+1,n_fold, patient))\n",
    "\n",
    "        CNN = TemporalCNN().cuda(0)\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.001)\n",
    "#        optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n",
    "\n",
    "        n_epochs = 100\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 4\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "\n",
    "            #acc\n",
    "            _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n",
    "            acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n",
    "\n",
    "            #val Loss\n",
    "            val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n",
    "            val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n",
    "            _, idx = torch.max(val_outputs.data,1)\n",
    "            val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n",
    "\n",
    "            if epoch%10==0:\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "        fold_vloss[fold, p ] = val_loss.item()\n",
    "        fold_loss[fold, p] = running_loss/i\n",
    "        fold_vacc[fold, p] = val_acc\n",
    "        fold_acc[fold, p] = acc\n",
    "        print('Finish Training Fold %d/%d\\t of Patient %d' % \n",
    "             (fold+1,n_fold, patient))\n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Peresented Results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtAAAAKUCAYAAAAtng/mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzde5hdeVkn+u9rIqI0NIlgq9DSqOiEiTfoabxET2I7Y+MoqIhDVBQn2Mfz2K0j6gxOHGhwMuP9MiMz56hhvIBpAS9P6/QIjFOlxvFCo+gIEWmQSwvKpQNN4wXSvOePvQPVRSWpX6V27aqdz+d59pNaa6+11/vWrqp861e/tVZ1dwAAgPX5sHkXAAAAO4kADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABpiBqnpKVZ1csXx3VX3ierbdwLH+R1V9w0b3B2CMAA1sC1X1NVV12zRovmUaCg/MqZb7VtU7q+oL13juR6vqRaOv2d2XdffrNqG2m6rqeate+7Hd/bMX+9oXOGZX1TWzOgbATiJAA3NXVU9L8mNJ/kOSK5J8QpL/kuTx59h+9yzr6e6/T/KLSb5+1XF3JTmcZGZhdbupqkry5CR3JtnSUe6a8P8UsO34wQTMVVVdnuTZSb6lu3+5u9/T3e/r7l/r7u+abnNTVb2oqp5XVXcleUpVfURV/VhVvXn6+LGq+ojp9g+qql+fjiLfWVW/czaIVdW/qaq/qqp3V9Wrq+rac5T2s0meUFUftWLdF2fyc/N/TF/r6VX12ulrvaqqvuI8fXZVffL044+uqluq6q6q+sMkn7Rq2x+vqjdNn395VX3+dP11Sf5tkn8xHan/k+n65ap66vTjD6uq76mqN1TVW6vq56af41TVVdM6vqGq3lhVb6+qoxd4iz4/yccn+bYkT6qq+6yq9Zuq6tSKz8GjpuuvrKpfrqq3VdU7quonpuvvNYK+oqbdK3o5VlW/m+Rvk3xiVX3jimO8rqr+71U1PL6qXjH9fL22qq6rqidW1ctXbfcdVfWrF+gX4IIEaGDePifJfZP8ygW2e3ySFyV5YJLnJzma5LOTfGaSz0hyTZLvmW77HUnuSPLgTEa0/22SrqpPTXJDkn/S3ffPJBC/fq2Ddff/TvKWJF+5YvWTk/xCd5+ZLr82k4B5eZJnJXleVX3cOnp+TpK/T/JxSf7l9LHSy6Z97U3yC0leWFX37e7fyGSU/henU0I+Y43Xfsr0cSjJJya5LMlPrNrmQJJPTXJtkmdU1b7z1PoNSX4tkxH5JPnSs09U1ROT3JTJSP0DkjwuyTumI/W/nuQNSa5K8pAkN5/nGKs9Ocn1Se4/fY23To/7gCTfmORHVwT1a5L8XJLvyuRr4wsyeU9vSfLwVb19XZKfH6gDYE0CNDBvH53k7StC6bn8Xnf/ane/v7v/LsnXJnl2d7+1u9+WSYB98nTb92USTh82Hc3+ne7uJPck+Ygkj6yqD+/u13f3a89zzJ/LdBpHVT0gkxD/gekb3f3C7n7ztKZfTPKaTIL8OU3D5ROSPGM62v5nWTUlpLuf193v6O4z3f3D05o/9QKfn7O+NsmPdPfruvvuJN+dycjxymkvz+ruv+vuP0nyJ5n8ArJWrR+V5ImZ/NLwvkx+gVk5jeOpSX6gu1/WE7d39xumn4OPT/Jd0x7/vrtHTpL8me5+5bT/93X3f+/u106P8VtJXpLJLy5JciTJc7v7pdP34a+6+8+7+x8yCf1fN+3lH2cS5n99oA6ANQnQwLy9I8mD1jGv+U2rlj8+k9HJs94wXZckP5jk9iQvmf7J/+lJ0t23J/lXmYyavrWqbq6qj08+cJWMs49PmL7OzyU5VFUPSfJVSW7v7j8+e8Cq+vrp1IF3VtU7k+xP8qAL9PHgJLtX9bOyj7NTDU5V1bumr3v5Ol73rLU+L7szGYk/669XfPy3mYxSr+UrkpxJcut0+flJHltVD54uX5nJKPxqVyZ5wzp+KTqXe73XVfXYqvr96XScdyb5knzw83GuGpLJLyZfU/WBedwvmAZrgIsiQAPz9nuZTGf48gts16uW35zkYSuWP2G6Lt397u7+ju7+xCRfluRpZ+c6d/cvdPeB6b6d5Pun6y9b8XjjdN0bk/xOJqO6T84kUCdJquphSX4qkykhH93dD0zyZ0nqAn28LZNQeuWq2s++7ucn+TdJvjrJnunrvmvF667+PKy21uflTJK/ucB+a/mGTML1G6vqr5O8MMmHZ3IiZTIJup+0xn5vSvIJ5/il6D1JVs4r/9g1tvlAjzWZ1/5LSX4oyRXTz8et+eDn41w1pLt/P8l7Mxmt/pqYvgFsEgEamKvufleSZyR5TlV9eVV9VFV9+HTU8QfOs+uJJN9TVQ+uqgdNX+N5SVJVX1pVnzwdebwrk6kb91TVp1bVF05D2d8n+bvpc+fzs5mE5M/LZAT2rPtlEvTeNj3mN2YyAn2hfu9J8stJbpr2+sjce1rE/TMJvG9LsruqnpHJ3N+z/ibJVXXuq1OcSPLtVfXwqrosH5wzPTQaPB11vzaTucefmQ/ONf/+FfX+dJLvrKpH18QnT3+x+MNM5o9/X1XdryaXBfy86T6vSPIFVfUJ05Mbv/sCpdwnkyksb0typqoem+SfrXj+eJJvrKpra3IC5UOq6h+teP7nMpkDfmZwGgnAOQnQwNx1948keVomJwG+LZNRxRuSnO+KCf8+yW1J/jTJ/0nyR9N1SfKIJP8zyd2ZjHD/l+5eziSIfV+St2cyjeFjMjnB8HxelGRPkt/s7resqPlVSX54+vp/k+TTkvzuevqd9nbZtIafSfLfVjz34kyu8vEXmUy/+Pvce0rDC6f/vqOq/miN135uJiOtv53kL6f737jOulZ6cpJXdPdLuvuvzz6S/Kckn15V+7v7hUmOZXKi47szeb/2Tn9J+LIkn5zkjZmc0PkvkqS7X5rJ3OQ/TfLyXGBOcne/O8m3JnlBktOZjCTfsuL5P8z0xMJMRup/K/cegf/5TH6xMfoMbJqanFcDAIunqj4yk6t4PKq7XzPveoDFYAQagEX2/yR5mfAMbKaZ3s0LAOalql6fycmGFzpBFWCIKRwAADDAFA4AABggQANsY1X1+qr6ounHN1XV89a533JVPXW21QFcmgRogEFV1VX1nuldC/+qqn5keovuHaOqPqWqXlhVb5/e8fBPq+ppVbWrqq6a9vjfV+3zvKq6afrxwek2z1m1zcmqesrWdQKw9QRogI35jO6+LJObjXxNkm8afYF13L58Jqrqk5L8QSbXl/607r48yROTXJ3JjVzO+uwVN0BZy3uSfH1VXTWjUgG2JQEa4CJ0959ncrvv/UlSVU+vqtdW1bur6lVV9RVnt62qp1TV71bVj1bVnZncjfCTqup/VdU7pqPBz6+qB67n2FX12VX1v6vqnVX1J1V1cJ1lPyvJ/+7up529OUx3v7q7v6a737liux/IB29Os5Z3ZnIjmGeu87gAC0GABrgI01txf36SP56ueu10+fJMgurzqurjVuzymCSvy+QuiMcyuczaf0zy8Un2JbkyyU3rOO5Dkvz3TALu3iTfmeSXqurB6yj7izK5w+KFPCfJp5ydg30Ox5I8oao+dR2vB7AQBGiAjfmjqjqd5NeS/HSmt+Pu7hd295u7+/3d/YtJXpPkmhX7vbm7/3N3n+nuv+vu27v7pd39D939tiQ/kuT/Wsfxvy7Jrd196/RYL83k1uZfso59PzrJWy641eQ24MdynlHo6e29/98kz17H6wEsBDdSAdiYR3X37atXVtXXJ3lakqumqy5L8qAVm7xp1fYfk+Q/ZTJqff9MBjZOr+P4D0vyxKr6shXrPjzJ0jr2fUeSj7vgVhM/leS7Vh1nte9P8tqq+ox1vibAjmYEGmCTVNXDMgmcNyT56O5+YJI/y2Saxlmr7171H6frPr27H5DJyHLlwt6U5Oe7+4ErHvfr7u9bx77/M8kT1rFduvt9mUxF+d5z1dXd70jyY9NtABaeAA2wee6XSRh+W5JU1TdmenLhedw/yd1J3jmd1/xd6zzW85J8WVV98fTSc/edXlruoevY95lJPreqfrCqPnZa6ydPL1O31gmMP5/kI5Jcd57X/JEkn5vJPG6AhSZAA2yS7n5Vkh9O8ntJ/ibJpyX53Qvs9qwkj0ryrkxOCvzldR7rTUken+TfZhLY35RJ+L7gz/Xufm2Sz8lkmskrq+pdSX4pkznU715j+3syCd17z/Oad2Vy1Y5zbgOwKKp79V8TAQCAczECDQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwYPe8Cxj1oAc9qK+66qqZH+c973lP7ne/+838OFthkXpJFqufReolWax+FqmXZLH6WaReksXqZ5F6SRarn0XqJdm6fl7+8pe/vbsfvHr9jgvQV111VW677baZH2d5eTkHDx6c+XG2wiL1kixWP4vUS7JY/SxSL8li9bNIvSSL1c8i9ZIsVj+L1Euydf1U1RvWWm8KBwAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBg97wLYPNU1Yb26+5NrgQAYHHNdAS6qq6rqldX1e1V9fQ1nn9YVf1mVf1pVS1X1UNnWc+i6+41H+d7TngGABgzswBdVbuSPCfJY5M8Msnhqnrkqs1+KMnPdfenJ3l2kv84q3oAAGAzzHIE+pokt3f367r7vUluTvL4Vds8MslvTj9eWuN5AADYVmpWf8Kvqq9Kcl13P3W6/OQkj+nuG1Zs8wtJ/qC7f7yqvjLJLyV5UHe/Y9VrXZ/k+iS54oorHn3zzTfPpOaV7r777lx22WUzP85WOHToUJaWluZdxqZZpPdmkXpJFqufReolWax+FqmXZLH6WaReksXqZ5F6Sbaun0OHDr28u69evX6WJxGudUbb6rT+nUl+oqqekuS3k/xVkjMfslP3Tyb5ySS5+uqr++DBg5ta6FqWl5ezFcfZKovUyyK9Nzuxl0vlZNWd+N6czyL1s0i9JIvVzyL1kixWP4vUSzL/fmYZoO9IcuWK5YcmefPKDbr7zUm+Mkmq6rIkT+jud82wJuAinS8IV9WOC8oAMGqWc6BfluQRVfXwqrpPkicluWXlBlX1oKo6W8N3J3nuDOsBAICLNrMA3d1nktyQ5MVJTiV5QXe/sqqeXVWPm252MMmrq+ovklyR5Nis6gEAgM0w0xupdPetSW5dte4ZKz5+UZIXzbIGAADYTG7lDQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGDDTW3kDALB1qmpD+3X3Jley2ARoAIAFca4gXFVC8iYyhQMAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAgEv6Riru1gMAwKhLOkCfLwi7Yw8AwPxs54HOSzpAAwCwPW3ngU5zoAEAYIAADQAAAwRoAAAYYA70TnPT5cO79DMfsKH9ctO7xvcBAFhwAvQOU8+6a3jS/PLycg4ePDh2nKr0TUO7AABcEkzhAACAAQI0AAAMEKABAGCAOdAAbEvb+S5kwKVNgAZgWzpXEJ73HcgATOEAAIABAjSwpr1796aqhh5JhvfZu3fvnDsFgDECNLCm06dPp7uHHktLS8P7nD59et6tAsAQARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNMzBiRMnsn///lx77bXZv39/Tpw4Me+SAIB12j3vAuBSc+LEiRw9ejTHjx/PPffck127duXIkSNJksOHD8+5OgDgQoxAwxY7duxYjh8/nkOHDmX37t05dOhQjh8/nmPHjs27NABgHQRo2GKnTp3KgQMH7rXuwIEDOXXq1Jwqgvnau3dvqmrdjyRD21dV9u7dO+cugUUiQMMW27dvX06ePHmvdSdPnsy+ffvmVBHM1+nTp9Pd634sLS0Nbd/dOX369LzbBBaIAA1b7OjRozly5EiWlpZy5syZLC0t5ciRIzl69Oi8SwMA1sFJhLDFzp4oeOONN+bUqVPZt29fjh075gRCANghBGiYg8OHD+fw4cNZXl7OwYMH510OADDAFA4AABggQAMAwABTOAAWxNlLvI3q7k2uBGCxCdAAC+J8QbiqBGWATWIKBwAADBCgAQBgwCURoEdvE+tWsQAAnMslMQf67G1iR2zk+rwbPYFn1FYcZ8+ePTM/BgDATnRJjEAvku4efiwtLQ3vc+edd867VQCAbUmABgCAAQI0AMAOM3p+V+Lcrs000wBdVddV1aur6vaqevoaz39CVS1V1R9X1Z9W1ZfMsh4AgEVw9vyuWU7nPH369Lzb3LZmFqCraleS5yR5bJJHJjlcVY9ctdn3JHlBd39Wkicl+S+zqgcAADbDLEegr0lye3e/rrvfm+TmJI9ftU0necD048uTvHmG9QAAwEWb5WXsHpLkTSuW70jymFXb3JTkJVV1Y5L7JfmitV6oqq5Pcn2SXHHFFVleXh4uZnSfu+++e0uOsxU22st2tUj9bPdefN8sz7uMTbWd+xmpbZG+zpLF+lpbpF6S7d/Ppfx9k8y3thq9PvK6X7jqiUm+uLufOl1+cpJruvvGFds8bVrDD1fV5yQ5nmR/d7//XK979dVX92233TZay5ZdB3pWn8+LsZFetrNF6mc79+L7Zvu+NxuxXT/PyXhti/R1lizW19oi9ZJs734u9e+braqtql7e3VevXj/LKRx3JLlyxfJD86FTNI4keUGSdPfvJblvkgfNsCYAALgoswzQL0vyiKp6eFXdJ5OTBG9Ztc0bk1ybJFW1L5MA/bYZ1gQAABdlZnOgu/tMVd2Q5MVJdiV5bne/sqqeneS27r4lyXck+amq+vZMTih8Sm/XvxWwpTZ6u3JfPgDArM3yJMJ0961Jbl217hkrPn5Vks+bZQ3sTOcLwtt5ThYAsPjciRAAAAYI0AAAMECABgCAAQI0AAAMEKABAGDATK/CAQC4NCcsGgEaAGbMpTlhsZjCAQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AwNzs3bs3VTX0SDK8z969ezetZgEaAIC5OX36dLp76LG0tDS8z+nTpzetZgGaudqJv3UCAJc2AZq52om/dQIAl7bd8y4AAGBezv5lc1R3b3Il7CQCNABwyTpfEK4qQZk1mcIBAAADBGiAHcbJtwDzJUAD7DBOvt2+/HIDlwYBGgA2iV9u4NIgQAMAwAABGgAABriMHQDADtPPfEBy0+Xr3v5gkixv4BisSYAGAIa4+cj81bPuGvp8Li8v5+DBg2PHqErfNFbXpUKABgCGnCu4ufEIlwpzoAEAYIAADQAAA0zhAGCunAwF7DQCNADzddO7hjbfyMlQAJvJFA4AABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIDrQAPAJhm9KUzixjCwEwnQADuMkLZ91bPuSncP7bORG8NUVfqmoV2ATSRAA+wwQhrAfJkDDQCsae/evamqdT+SDG1fVdm7d++cu4RxAjQAsKbTp0+nu9f9WFpaGtq+u3P69Ol5twnDBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABrgMNm2Tv3r1bcjb5nj17cuedd878OADA2oxAwyYZvdyTSz4BwM4kQAMAwAABGgBYeKN3VXRnRc5HgAYAFp5pdmwmJxEusBMnTuTYsWM5depU9u3bl6NHj+bw4cPzLosdop/5gOSmy4f2OZgkyxs4DgDsIAL0gjpx4kSOHj2a48eP55577smuXbty5MiRJBGiWZd61l3p7qF9lpeXc/DgwbHjVKVvGtoFAObKFI4FdezYsRw/fjyHDh3K7t27c+jQoRw/fjzHjh2bd2kAADuaEegFderUqRw4cOBe6w4cOJBTp07NqSKAS8PZk89mac+ePTM/BnBuRqAX1L59+3Ly5Ml7rTt58mT27ds3p4oAFt/oCWcbPVHNzZRgvgToBXX06NEcOXIkS0tLOXPmTJaWlnLkyJEcPXp03qUBAOxopnAsqLMnCt54440fuArHsWPHnEAIAHCRBOgFdvjw4Rw+fHhDV0YAAGBtpnAAAMAAARoAAAbMdApHVV2X5MeT7Ery0939faue/9Ekh6aLH5XkY7r7gbOsCQCA7WMn3vl2ZgG6qnYleU6Sf5rkjiQvq6pbuvtVZ7fp7m9fsf2NST5rVvUAALD97MQ7385yCsc1SW7v7td193uT3Jzk8efZ/nCSEzOsBwAALtosp3A8JMmbVizfkeQxa21YVQ9L8vAk/+scz1+f5PokueKKK7K8vDxczOg+d99995YcZytstJetskjvzSL1spHjbPd+Rvi+2dhxtsJ2f29Gbfd+Rmrb7l9ni/Z9470Z22cjxzmnjdw1aT2PJE/MZN7z2eUnJ/nP59j235zrudWPRz/60T1q0uaYpaWlLTnOVthIL1tlkd6bReplo8fZzv2M8n3jvdkq27mf0a+B7fx1tmjfN96bpS05TpLbeo08OsspHHckuXLF8kOTvPkc2z4ppm8AALADzDJAvyzJI6rq4VV1n0xC8i2rN6qqT02yJ8nvzbAWAADYFDML0N19JskNSV6c5FSSF3T3K6vq2VX1uBWbHk5y83SYHAAAtrWZXge6u29Ncuuqdc9YtXzTLGsAAIDNNNMADReyEy+eDgBc2gRo5uumdw3vspGLpwMAbJZZnkQIAAALR4AGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAM2D3vAmBR9DMfkNx0+dA+B5NkeQPHYdNU1Yb26+5NrgSAnUKAhs1y07uGd1leXs7Bgwc3vxbW7VxBuKqEZADWZAoHAAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADds+7AAAAxlXVTF9/z549M339nUyABgDYYbp7aPuqGt6HczOFAwAABgjQAAAwQIAGAIAB5kADAAuvn/mA5KbLh/Y5mCTLGzgOC0+ABgAWXj3rruGT6JaXl3Pw4MGx41SlbxrahR3IFA4AABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAbvnXQAA46pq5sfYs2fPzI8BsBMJ0AA7THcP71NVG9oPgA9lCgcAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAV+EAANbUz3xActPl697+YJIsb+AYsMMI0ADAmupZdw1d/nB5eTkHDx4cO0ZV+qaxumDeTOEAAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGDA7nkXAACwFapq5sfYs2fPzI/B/F1wBLqqbqiqDX01VNV1VfXqqrq9qp5+jm2+uqpeVVWvrKpf2MhxAADOp7uHHxvZ784775xzp2yF9YxAf2ySl1XVHyV5bpIX99mvqvOoql1JnpPknya5Y/oat3T3q1Zs84gk353k87r7dFV9zEaaAACArXLBEeju/p4kj0hyPMlTkrymqv5DVX3SBXa9Jsnt3f267n5vkpuTPH7VNt+U5DndfXp6rLcO1g8AAFtqXXOgu7ur6q+T/HWSM0n2JHlRVb20u//1OXZ7SJI3rVi+I8ljVm3zKUlSVb+bZFeSm7r7N1a/UFVdn+T6JLniiiuyvLy8nrLvZXSfu+++e0uOsxU22st2tUj9bPdeLuXvm2T71rVRi9LPdv++GbXd+xmpbdF+BiTbu7ZR27mXnfb/TV1oNkZVfWuSb0jy9iQ/neRXu/t9VfVhSV7T3WuORFfVE5N8cXc/dbr85CTXdPeNK7b59STvS/LVSR6a5HeS7O/ud56rnquvvrpvu+22gRYnJw2sY9bJvSwvL+fgwYMzP85W2Egv29ki9bOde7nUv2+2a10btUj9bOfvm43Yzv2Mft0s0s+AZHvXNmo797Kd/7+pqpd399Wr169nBPpBSb6yu9+wcmV3v7+qvvQ8+92R5MoVyw9N8uY1tvn97n5fkr+sqldnMl3kZeuoCwAAttx6rgN9a5IPnFJaVfevqsckSXefOs9+L0vyiKp6eFXdJ8mTktyyaptfTXJo+roPymRKx+vWXz4AAGyt9YxA/9ckj1qx/J411n2I7j5TVTckeXEm85uf292vrKpnJ7mtu2+ZPvfPqupVSe5J8l3d/Y4N9HFe/cwHJDddPrTPwSRZ3sBxAABYaOsJ0LXysnXTqRvrPfnw1kxGsFeue8aKjzvJ06aPmaln3bV1c2tuGtoFAIAdZj1TOF5XVd9aVR8+fXxbTLMAAOAStZ4A/c1JPjfJX+WDl6K7fpZFAQDAdnXBqRjTm5s8aQtqAbaZqpr5Mfbs2TPzYwDAZrpggK6q+yY5kuQfJ7nv2fXd/S9nWBcwZxu5Xuh2vs4oAGyW9Uzh+PkkH5vki5P8VibXc373LIsCAIDtaj0B+pO7+98leU93/2ySf57k02ZbFgAAo6pqzcf5ntuK6XqLZj0B+n3Tf99ZVfuTXJ7kqplVBADAhnT3mo+lpaVzPmfq3bj1XM/5J6tqT5LvyVKoQsEAABjzSURBVOROgpcl+XczrQoAALap8wboqvqwJHd19+kkv53kE7ekKgCALXCh6Qvnet6o7aXtvFM4uvv9SW7YoloAALbU+aY1nG/aA5e29cyBfmlVfWdVXVlVe88+Zl4ZAABsQ+uZA332es/fsmJdx3QOAAAuQeu5E+HDt6IQAADYCdZzJ8KvX2t9d//c5pcDsPn27t2b06dPD+83em3UPXv25M477xw+DgA7y3qmcPyTFR/fN8m1Sf4oiQAN7AinT58ePulneXk5Bw8eHNrHzQgALg3rmcJx48rlqro8k9t7AwDAJWc9V+FY7W+TPGKzCwEAgJ1gPXOgfy2Tq24kk8D9yCQvmGVRAACwXa1nDvQPrfj4TJI3dPcdM6oHAAC2tfUE6DcmeUt3/32SVNVHVtVV3f36mVYGAADb0HrmQL8wyftXLN8zXQcAAJec9QTo3d393rML04/vM7uSAABg+1pPgH5bVT3u7EJVPT7J22dXEgAAbF/rmQP9zUmeX1U/MV2+I8madycEAIBFt54bqbw2yWdX1WVJqrvfPfuyAABge7rgFI6q+g9V9cDuvru7311Ve6rq329FcQAAsN2sZw70Y7v7nWcXuvt0ki+ZXUkAALB9rSdA76qqjzi7UFUfmeQjzrM9AAAsrPWcRPi8JL9ZVf9tuvyNSX52diUBAMD2dcER6O7+gST/Psm+JI9M8htJHjbjugDgXk6cOJH9+/fn2muvzf79+3PixIl5lwRcotYzAp0kf53J3Qi/OslfJvmlmVUEAKucOHEiR48ezfHjx3PPPfdk165dOXLkSJLk8OHDc64OuNSccwS6qj6lqp5RVaeS/ESSN2VyGbtD3f0T59oPADbbsWPHcvz48Rw6dCi7d+/OoUOHcvz48Rw7dmzepQGXoPONQP95kt9J8mXdfXuSVNW3b0lVAJuon/mA5KbLh/Y5mCTLGzgOM3Hq1KkcOHDgXusOHDiQU6dOzaki4FJ2vgD9hCRPSrJUVb+R5OYktSVVAWyietZd6e6hfZaXl3Pw4MGx41SlbxrahXXat29fTp48mUOHDn1g3cmTJ7Nv3745VgVcqs45haO7f6W7/0WSf5TJOMy3J7miqv5rVf2zLaoPAHL06NEcOXIkS0tLOXPmTJaWlnLkyJEcPXp03qUBl6D13Mr7PUmen+T5VbU3yROTPD3JS2ZcGwAk+eCJgjfeeGNOnTqVffv25dixY04gBOZivVfhSJJ0951J/r/pAwC2zOHDh3P48OENTa8B2EzruRMhAAAwJUADAMCAoSkcO1nV7C8gsmfPnpkfAwCA+bokAvTo5auS6eWoNrAfAACLzRQOAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0Ac3DixIns378/1157bfbv358TJ07MuyRgnS6JG6kAwHZy4sSJHD16NMePH88999yTXbt25ciRI0mSw4cPz7k64EKMQAPAFjt27FiOHz+eQ4cOZffu3Tl06FCOHz+eY8eOzbs0YB0EaADYYqdOncqBAwfute7AgQM5derUnCoCRgjQALDF9u3bl5MnT95r3cmTJ7Nv3745VQSMEKABYIsdPXo0R44cydLSUs6cOZOlpaUcOXIkR48enXdpwDo4iRAAttjZEwVvvPHGnDp1Kvv27cuxY8ecQAg7hAANAHNw+PDhHD58OMvLyzl48OC8ywEGmMIBAAADBGgAABggQAMAwIBLeg50VW3o+e6eRTkAAOwAl/QIdHef87G0tHTO5wAAuHRd0gEaAABGCdAAADBAgAYAgAECNAAADBCgAQBgwEwDdFVdV1Wvrqrbq+rpazz/lKp6W1W9Yvp46izrAQCAizWz60BX1a4kz0nyT5PckeRlVXVLd79q1aa/2N03zKoOAADYTLMcgb4mye3d/brufm+Sm5M8fobHAwCAmZvlnQgfkuRNK5bvSPKYNbZ7QlV9QZK/SPLt3f2m1RtU1fVJrk+SK664IsvLy5tf7Sp33333lhxnKyxSL8li9bNIvZy1XfsZrWuj7808+z906NB5nz/X3VWXlpZmUc7MLNr3zXbvZ6S2nfh9cz7b/b0Zsd172XE/o893N76LeSR5YpKfXrH85CT/edU2H53kI6Yff3OS/3Wh1330ox/dW2FpaWlLjrMVFqmX7sXqZ5F66e6e/EjZfjZS10bem+3af/difa0tUi/d27uf0a9p3zfb13buZTv/jE5yW6+RR2c5heOOJFeuWH5okjev3KC739Hd/zBd/Kkkj55hPQAAcNFmGaBfluQRVfXwqrpPkicluWXlBlX1cSsWH5fk1AzrAQCAizazOdDdfaaqbkjy4iS7kjy3u19ZVc/OZDj8liTfWlWPS3ImyZ1JnjKregAAYDPM8iTCdPetSW5dte4ZKz7+7iTfPcsaAABgM7kTIQAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBg97wLAHaWqtrQ8909i3IAYMsZgQaGdPc5H0tLS+d8DgAWhQANAAADBGgAABggQAMAwAABGgAABrgKBwAAc3WhKzxthj179mzaawnQAADMzUau1FRVc73CkykcAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAA1yFAwA4p1lfXmwzLy0GW0WABgDWNHqZsHlfWgy2iikcAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAa4kQoAANvOhe6Cea7nt+JmPkagAQDYdrr7nI+lpaVzPrcVBGgAABggQAMAwAABGgAABjiJELgkXOhklM2wZ8+emR8DgPkToIGFt5GTSqpqy05GAWBnMYUDAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAA2YaoKvquqp6dVXdXlVPP892X1VVXVVXz7IeAAC4WDML0FW1K8lzkjw2ySOTHK6qR66x3f2TfGuSP5hVLQAAsFlmOQJ9TZLbu/t13f3eJDcnefwa231vkh9I8vczrAUAADZFdfdsXrjqq5Jc191PnS4/OcljuvuGFdt8VpLv6e4nVNVyku/s7tvWeK3rk1yfJFdcccWjb7755pnUvNLdd9+dyy67bObH2QqL1EuyWP0sUi/JYvVz6NChLC0tzbuMTbNI780i9ZIsVj++b7avReol2bp+Dh069PLu/pApxrtneMxaY90H0npVfViSH03ylAu9UHf/ZJKfTJKrr766Dx48uDkVnsfy8nK24jhbYZF6SRarn0XqJVm8fhapl0V6bxapl2Tx+lmkXhbpvVmkXpL59zPLKRx3JLlyxfJDk7x5xfL9k+xPslxVr0/y2UlucSIhAADb2SwD9MuSPKKqHl5V90nypCS3nH2yu9/V3Q/q7qu6+6okv5/kcWtN4QAAgO1iZgG6u88kuSHJi5OcSvKC7n5lVT27qh43q+MCAMAszXIOdLr71iS3rlr3jHNse3CWtQAAwGZwJ0IAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABuyedwEAwM5SVRt6rrtnUQ5sOSPQAMCQ7l7zsbS0dM7nhGcWiQANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMGCmAbqqrquqV1fV7VX19DWe/+aq+j9V9YqqOllVj5xlPQAAcLFmFqCraleS5yR5bJJHJjm8RkD+he7+tO7+zCQ/kORHZlUPAABshlmOQF+T5Pbufl13vzfJzUkev3KD7r5rxeL9kvQM6wEAgItW3bPJrFX1VUmu6+6nTpefnOQx3X3Dqu2+JcnTktwnyRd292vWeK3rk1yfJFdcccWjb7755pnUvNLdd9+dyy67bObH2QqL1EuyWP0sUi/JYvVz6NChLC0tzbuMTbNI780i9ZIsVj+L1EuyWP0sUi/J1vVz6NChl3f31avXzzJAPzHJF68K0Nd0943n2P5rptt/w/le9+qrr+7bbrtt0+tdbXl5OQcPHpz5cbbCIvWSLFY/i9RLslj9VFVm9fNxHhbpvVmkXpLF6meRekkWq59F6iXZun6qas0APcspHHckuXLF8kOTvPk829+c5MtnWA8AAFy0WQbolyV5RFU9vKruk+RJSW5ZuUFVPWLF4j9P8iHTNwAAYDvZPasX7u4zVXVDkhcn2ZXkud39yqp6dpLbuvuWJDdU1RcleV+S00nOO30DAADmbWYBOkm6+9Ykt65a94wVH3/bLI8PAACbzZ0IAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQMAcnTpzI/v37c+2112b//v05ceLEvEsCANZp97wLgEvNiRMncvTo0Rw/fjz33HNPdu3alSNHjiRJDh8+POfqAIALMQINW+zYsWM5fvx4Dh06lN27d+fQoUM5fvx4jh07Nu/SAIB1MAINW+zUqVM5cODAvdYdOHAgp06dmlNFl7aq2tBz3T2LcgDYAYxAwxbbt29fTp48ea91J0+ezL59++ZU0aWtu9d8LC0tnfM54Rng0iZAwxY7evRojhw5kqWlpZw5cyZLS0s5cuRIjh49Ou/SAIB1MIUDttjZEwVvvPHGnDp1Kvv27cuxY8ecQAgAO4QADXNw+PDhHD58OMvLyzl48OC8ywEABpjCAQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABlR3z7uGIVX1tiRv2IJDPSjJ27fgOFthkXpJFqufReolWax+FqmXZLH6WaReksXqZ5F6SRarn0XqJdm6fh7W3Q9evXLHBeitUlW3dffV865jMyxSL8li9bNIvSSL1c8i9ZIsVj+L1EuyWP0sUi/JYvWzSL0k8+/HFA4AABggQAMAwAAB+tx+ct4FbKJF6iVZrH4WqZdksfpZpF6SxepnkXpJFqufReolWax+FqmXZM79mAMNAAADjEADAMAAARoAAAYI0KtU1XOr6q1V9WfzruViVdWVVbVUVaeq6pVV9W3zrmmjquq+VfWHVfUn016eNe+aNkNV7aqqP66qX593LRejql5fVf+nql5RVbfNu56LVVUPrKoXVdWfT79/PmfeNW1EVX3q9D05+7irqv7VvOu6GFX17dOfAX9WVSeq6r7zrmmjqurbpn28cie+L2v9f1lVe6vqpVX1mum/e+ZZ44hz9PPE6fvz/qraMZeAO0cvPzj9mfanVfUrVfXAedY44hz9fO+0l1dU1Uuq6uO3siYB+kP9TJLr5l3EJjmT5Du6e1+Sz07yLVX1yDnXtFH/kOQLu/szknxmkuuq6rPnXNNm+LYkp+ZdxCY51N2fuSDXGf3xJL/R3f8oyWdkh75H3f3q6XvymUkeneRvk/zKnMvasKp6SJJvTXJ1d+9PsivJk+Zb1cZU1f4k35Tkmky+xr60qh4x36qG/Uw+9P/Lpyf5ze5+RJLfnC7vFD+TD+3nz5J8ZZLf3vJqLs7P5EN7eWmS/d396Un+Isl3b3VRF+Fn8qH9/GB3f/r059uvJ3nGVhYkQK/S3b+d5M5517EZuvst3f1H04/fnUkIeMh8q9qYnrh7uvjh08eOPgO2qh6a5J8n+el518IHVdUDknxBkuNJ0t3v7e53zreqTXFtktd291bcyXWWdif5yKraneSjkrx5zvVs1L4kv9/df9vdZ5L8VpKvmHNNQ87x/+Xjk/zs9OOfTfLlW1rURVirn+4+1d2vnlNJG3aOXl4y/VpLkt9P8tAtL2yDztHPXSsW75ctzgQC9CWiqq5K8llJ/mC+lWzcdLrDK5K8NclLu3vH9jL1Y0n+dZL3z7uQTdBJXlJVL6+q6+ddzEX6xCRvS/LfptNrfrqq7jfvojbBk5KcmHcRF6O7/yrJDyV5Y5K3JHlXd79kvlVt2J8l+YKq+uiq+qgkX5LkyjnXtBmu6O63JJNBnCQfM+d6WNu/TPI/5l3ExaqqY1X1piRfGyPQbLaquizJLyX5V6t+Y9tRuvue6Z9qHprkmumfQHekqvrSJG/t7pfPu5ZN8nnd/agkj81kqtAXzLugi7A7yaOS/Nfu/qwk78nO+jP0h6iq+yR5XJIXzruWizGdT/v4JA9P8vFJ7ldVXzffqjamu08l+f5M/qz+G0n+JJNpdzBTVXU0k6+158+7lovV3Ue7+8pMerlhK48tQC+4qvrwTMLz87v7l+ddz2aY/jl9OTt7rvrnJXlcVb0+yc1JvrCqnjffkjauu988/fetmcyxvWa+FV2UO5LcseIvHC/KJFDvZI9N8kfd/TfzLuQifVGSv+zut3X3+5L8cpLPnXNNG9bdx7v7Ud39BZn8efo1865pE/xNVX1ckkz/feuc62GFqvqGJF+a5Gt7sW4E8gtJnrCVBxSgF1hVVSbzOE9194/Mu56LUVUPPnvGcFV9ZCb/kf75fKvauP7/27u3EKuqOI7j3x9NpF3owbJ7TJQk1INghBGFoJkF+VI9RIRWUEHkW5Q+FNVDvkdQiWCQBmZ0gcIsIii6UOakXYSg0HroKYNCK7F/D3tPjdOM43bCM2f8fmCYfdbZa+3/YuDM/6y91l5Vq6rq/KoapLm1/m5V9eVIWpJTkpw2fAwsobk93Zeq6ifghySXtkWLgK97GNL/4Tb6fPpGaw+wIMnJ7efbIvp0gSdAktnt7wtpFqpNh7/R68Dy9ng58FoPY9EISZYCDwHLqmpfr+OZrFGLbpdxjHOCgWN5sX6Q5EVgIXBGkh+BR6tqXW+jOmpXA3cAO9u5wwCrq+rNHsZ0tM4Bnk9yAs0Xv01V1dePfptGzgJeafIZBoCNVbWltyFN2gPAhnbqw3fAnT2O56i182uvA+7tdSyTVVWfJNkMfE5zC3o7/b098ctJZgEHgPuram+vA+pirP+XwBpgU5K7ab7w3Nq7CLsZpz8/A08BZwJvJBmqqut7F+WRGacvq4CTgLfbz+uPq+q+ngXZwTj9ubEd6PgL2A0c0764lbckSZLUgVM4JEmSpA5MoCVJkqQOTKAlSZKkDkygJUmSpA5MoCVJkqQOTKAlaYpIcjDJUJIvk7zUPoLucOevHvX6w0lce0WSc8d57/Eki8coX5jEx0lKOu6YQEvS1LG/quZV1eXAn0z8XNNDEuiqmsyufCtotsf+j6p6pKremUTbkjStmEBL0tT0PnAJQJJXk2xL8lWSe9qyNcDMdsR6Q1v223DlJA8m+TTJjiSPtWWDSb5JsrZta2uSmUluAa6g2TxmqN3tkxFtrW/PIcnSJLuSfECze54kHXdMoCVpikkyANwA7GyL7qqq+TRJ7soks6rqYf4dsb59VP0lwBzgSmAeMD/Jte3bc4Cnq+oy4Bfg5qraDHwG3N62t3+cuGYAa4GbgGuAs/+/XktS/zCBlqSpY2aSIZpkdg+wri1fmeQL4GPgApok+HCWtD/baba9njuizvdVNdQebwMGO8Q3t63/bTXb2L7Qoa4kTRsDvQ5AkvSP/VU1b2RBkoXAYuCqqtqX5D1gxgTtBHiyqp4d1dYg8MeIooPAIdM1jkB1PF+Sph1HoCVpajsd2Nsmz3OBBSPeO5DkxDHqvAXcleRUgCTnJZk9wXV+BU6b4JxdwEVJLm5f3zZx+JI0/ZhAS9LUtgUYSLIDeIJmGsew54Adw4sIh1XVVmAj8FGSncBmJk6O1wPPjLWIcES7vwP3AG+0iwh3H0V/JKnvpZnGJkmSJOlIOAItSZIkdWACLUmSJHVgAi1JkiR1YAItSZIkdWACLUmSJHVgAi1JkiR1YAItSZIkdfA3wy2FYBAw2qsAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 864x720 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "sio.savemat('Result/Res_TemporalCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n",
    "\n",
    "fig = plt.figure(figsize=(12,10))\n",
    "plt.grid()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.suptitle('Cross-Validation Accuracy\\n Parallel CNN')\n",
    "ax = plt.gca()\n",
    "plt.xlabel('Patient id')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.savefig('Result/ParallelCNN.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sio.savemat('Result/Res_2dTemporalCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n",
    "\n",
    "fig = plt.figure(figsize=(12,10))\n",
    "plt.grid()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.suptitle('Cross-Validation Accuracy\\n Parallel CNN')\n",
    "ax = plt.gca()\n",
    "plt.xlabel('Patient id')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.savefig('Result/2dParallelCNN.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LSTM Model (C)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 238,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTM(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(LSTM, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3,3,3)\n",
    "        self.conv2 = nn.Conv2d(3,3,5)\n",
    "        self.conv3 = nn.Conv2d(3,3,3)\n",
    "        self.conv4 = nn.Conv2d(3,3,5)\n",
    "        self.conv5 = nn.Conv2d(3,3,3)\n",
    "        self.conv6 = nn.Conv2d(3,3,5)\n",
    "        self.conv7 = nn.Conv2d(3,3,3)\n",
    "        self.conv8 = nn.Conv2d(3,3,5)\n",
    "        self.conv9 = nn.Conv2d(3,3,3)\n",
    "        self.conv10 = nn.Conv2d(3,3,5)\n",
    "        self.conv11 = nn.Conv2d(3,3,3)\n",
    "        self.conv12 = nn.Conv2d(3,3,5)\n",
    "        self.conv13 = nn.Conv2d(3,3,3)\n",
    "        self.conv14 = nn.Conv2d(3,3,5)     \n",
    "        self.pool1 = nn.MaxPool2d(2)\n",
    "        self.pool2 = nn.MaxPool2d(2)   \n",
    "        self.rnn1 = nn.LSTMCell(507,2)\n",
    "        self.fc1 = nn.Linear(120,512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.Softmax()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        batch_size = x.shape[0]\n",
    "        tmp = torch.zeros(batch_size, x.shape[1], x.shape[2],26,26).cuda()\n",
    "        tmp[:,0] = F.relu(self.conv2(F.relu(self.conv1(x[:,0]))))\n",
    "        tmp[:,1] = F.relu(self.conv4(F.relu(self.conv3(x[:,1]))))\n",
    "        tmp[:,2] = F.relu(self.conv6(F.relu(self.conv5(x[:,2]))))\n",
    "        tmp[:,3] = F.relu(self.conv8(F.relu(self.conv7(x[:,3]))))\n",
    "        tmp[:,4] = F.relu(self.conv10(F.relu(self.conv9(x[:,4]))))\n",
    "        tmp[:,5] = F.relu(self.conv12(F.relu(self.conv11(x[:,5]))))\n",
    "        tmp[:,6] = F.relu(self.conv14(F.relu(self.conv13(x[:,6]))))\n",
    "        x = torch.zeros(batch_size, x.shape[1], x.shape[2],13,13).cuda()\n",
    "        for i in range(7):\n",
    "            x[:,i] = self.pool1(tmp[:,i])\n",
    "        x = x.view(batch_size,x.shape[1],-1)\n",
    "        hx = torch.randn(1,batch_size)\n",
    "        print(hx.size())\n",
    "        print(x[:,0].size())\n",
    "        self.rnn1(x[:,0],hx)\n",
    "        x = x.reshape(batch_size, x.shape[2], x.shape[1]*x.shape[3],-1) # img reshape\n",
    "        x = F.relu(self.conv15(x))\n",
    "        x = F.relu(self.conv16(x))\n",
    "        x = self.pool2(x)\n",
    "        x = x.view(batch_size,-1)\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 239,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 2])\n",
      "torch.Size([2, 507])\n"
     ]
    },
    {
     "ename": "IndexError",
     "evalue": "Dimension out of range (expected to be in range of [-1, 0], but got 1)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-239-37f3230988bf>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[0mnet\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mLSTM\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mnet\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtmp\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    539\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-238-e8a90c20b4cc>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m     40\u001b[0m         \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mhx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     41\u001b[0m         \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 42\u001b[1;33m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrnn1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mhx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     43\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# img reshape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     44\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv15\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    539\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\rnn.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input, hx)\u001b[0m\n\u001b[0;32m    938\u001b[0m             \u001b[0mzeros\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhidden_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    939\u001b[0m             \u001b[0mhx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mzeros\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 940\u001b[1;33m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcheck_forward_hidden\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'[0]'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    941\u001b[0m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcheck_forward_hidden\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'[1]'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    942\u001b[0m         return _VF.lstm_cell(\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\rnn.py\u001b[0m in \u001b[0;36mcheck_forward_hidden\u001b[1;34m(self, input, hx, hidden_label)\u001b[0m\n\u001b[0;32m    769\u001b[0m                     input.size(0), hidden_label, hx.size(0)))\n\u001b[0;32m    770\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 771\u001b[1;33m         \u001b[1;32mif\u001b[0m \u001b[0mhx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m!=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhidden_size\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    772\u001b[0m             raise RuntimeError(\n\u001b[0;32m    773\u001b[0m                 \"hidden{} has inconsistent hidden_size: got {}, expected {}\".format(\n",
      "\u001b[1;31mIndexError\u001b[0m: Dimension out of range (expected to be in range of [-1, 0], but got 1)"
     ]
    }
   ],
   "source": [
    "net = LSTM().cuda()\n",
    "net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 499,
   "metadata": {},
   "outputs": [],
   "source": [
    "tot_img = Images[:,0,:,:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 393,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training Fold 1/5\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:19: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1,  50] loss: 1.385\tAccuracy : 0.314\t\tval-loss: 1.352\tval-Accuracy : 0.309\n",
      "[11,  50] loss: 0.841\tAccuracy : 0.834\t\tval-loss: 0.899\tval-Accuracy : 0.845\n",
      "[21,  50] loss: 0.826\tAccuracy : 0.865\t\tval-loss: 0.877\tval-Accuracy : 0.867\n",
      "[31,  50] loss: 0.804\tAccuracy : 0.875\t\tval-loss: 0.876\tval-Accuracy : 0.869\n",
      "[41,  50] loss: 0.797\tAccuracy : 0.883\t\tval-loss: 0.876\tval-Accuracy : 0.867\n",
      "Finish Training Fold 1/5\n",
      "Begin Training Fold 2/5\n",
      "[1,  50] loss: 1.386\tAccuracy : 0.317\t\tval-loss: 1.370\tval-Accuracy : 0.277\n",
      "[11,  50] loss: 0.869\tAccuracy : 0.831\t\tval-loss: 0.918\tval-Accuracy : 0.828\n",
      "[21,  50] loss: 0.842\tAccuracy : 0.827\t\tval-loss: 0.918\tval-Accuracy : 0.824\n",
      "[31,  50] loss: 0.825\tAccuracy : 0.831\t\tval-loss: 0.911\tval-Accuracy : 0.826\n",
      "[41,  50] loss: 0.824\tAccuracy : 0.832\t\tval-loss: 0.907\tval-Accuracy : 0.833\n",
      "Finish Training Fold 2/5\n",
      "Begin Training Fold 3/5\n",
      "[1,  50] loss: 1.384\tAccuracy : 0.482\t\tval-loss: 1.330\tval-Accuracy : 0.485\n",
      "[11,  50] loss: 0.871\tAccuracy : 0.837\t\tval-loss: 0.913\tval-Accuracy : 0.824\n",
      "[21,  50] loss: 0.830\tAccuracy : 0.862\t\tval-loss: 0.906\tval-Accuracy : 0.841\n",
      "[31,  50] loss: 0.815\tAccuracy : 0.881\t\tval-loss: 0.880\tval-Accuracy : 0.861\n",
      "[41,  50] loss: 0.813\tAccuracy : 0.878\t\tval-loss: 0.882\tval-Accuracy : 0.861\n",
      "Finish Training Fold 3/5\n",
      "Begin Training Fold 4/5\n",
      "[1,  50] loss: 1.392\tAccuracy : 0.318\t\tval-loss: 1.384\tval-Accuracy : 0.348\n",
      "[11,  50] loss: 0.927\tAccuracy : 0.791\t\tval-loss: 0.941\tval-Accuracy : 0.800\n",
      "[21,  50] loss: 0.840\tAccuracy : 0.839\t\tval-loss: 0.917\tval-Accuracy : 0.828\n",
      "[31,  50] loss: 0.823\tAccuracy : 0.848\t\tval-loss: 0.899\tval-Accuracy : 0.845\n",
      "[41,  50] loss: 0.813\tAccuracy : 0.845\t\tval-loss: 0.897\tval-Accuracy : 0.846\n",
      "Finish Training Fold 4/5\n",
      "Begin Training Fold 5/5\n",
      "[1,  50] loss: 1.335\tAccuracy : 0.418\t\tval-loss: 1.265\tval-Accuracy : 0.436\n",
      "[11,  50] loss: 0.880\tAccuracy : 0.821\t\tval-loss: 0.935\tval-Accuracy : 0.807\n",
      "[21,  50] loss: 0.832\tAccuracy : 0.850\t\tval-loss: 0.907\tval-Accuracy : 0.835\n",
      "[31,  50] loss: 0.823\tAccuracy : 0.837\t\tval-loss: 0.905\tval-Accuracy : 0.833\n",
      "[41,  50] loss: 0.810\tAccuracy : 0.837\t\tval-loss: 0.906\tval-Accuracy : 0.833\n",
      "Finish Training Fold 5/5\n"
     ]
    }
   ],
   "source": [
    "id_patient = \n",
    "\n",
    "n_fold = 5\n",
    "length = len(Mean_Images)\n",
    "\n",
    "fold_vloss = np.zeros((n_fold,n_patient))\n",
    "fold_loss = np.zeros((n_fold,n_patient))\n",
    "fold_vacc = np.zeros((n_fold,n_patient))\n",
    "fold_acc = np.zeros((n_fold,n_patient))\n",
    "\n",
    "train_id, test_id = kfold(length,n_fold)\n",
    "for fold in range(n_fold):\n",
    "    X_train = Mean_Images[train_id[fold]]\n",
    "    X_test = Mean_Images[test_id[fold]]    \n",
    "    y_train = Label[train_id[fold]]\n",
    "    y_test = Label[test_id[fold]]    \n",
    "    \n",
    "    print(\"Begin Training Fold %d/%d\" % \n",
    "         (fold+1,n_fold))\n",
    "    \n",
    "    CNN = BasicCNN().cuda(0)\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n",
    "    \n",
    "    n_epochs = 50\n",
    "    for epoch in range(n_epochs):\n",
    "        running_loss = 0.0\n",
    "        batchsize = 10\n",
    "        for i in range(int(len(y_train)/batchsize)):\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            # forward + backward + optimize\n",
    "            outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n",
    "            loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            running_loss += loss.item()\n",
    "\n",
    "        #acc\n",
    "        _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n",
    "        acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n",
    "\n",
    "        #val Loss\n",
    "        val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n",
    "        val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n",
    "        _, idx = torch.max(val_outputs.data,1)\n",
    "        val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n",
    "\n",
    "        if epoch%10==0:\n",
    "            print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "    fold_vloss.append(val_loss)\n",
    "    fold_loss.append(running_loss/i)\n",
    "    fold_vacc.append(val_acc)\n",
    "    fold_acc.append(acc)\n",
    "    print('Finish Training Fold %d/%d' % \n",
    "         (fold+1,n_fold))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BasicCNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(BasicCNN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n",
    "        self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.pool = nn.MaxPool2d((2,2))\n",
    "        self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n",
    "        self.fc1 = nn.Linear(507,512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.Softmax()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        batch_size = x.shape[0]\n",
    "        x = F.relu(self.conv1(x))\n",
    "        x = F.relu(self.conv2(x))\n",
    "        x = F.relu(self.conv3(x))\n",
    "        x = F.relu(self.conv4(x))\n",
    "        x = self.pool(x)\n",
    "        x = F.relu(self.conv5(x))\n",
    "        x = F.relu(self.conv6(x))\n",
    "        x = self.pool(x)\n",
    "        x = F.relu(self.conv7(x))\n",
    "        x = self.pool(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "192"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "64*3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MaxCNN(nn.Module):\n",
    "    def __init__(self, input_image, kernel=(3,3), stride=1, padding=1,max_kernel=(2,2)):\n",
    "        super(MaxCNN, self).__init__()\n",
    "        \n",
    "        \n",
    "        n_window = input_image.shape[1]\n",
    "        n_channel = input_image.shape[2]\n",
    "        \n",
    "        self.conv1 = nn.Conv2d(n_channel,32,kernel,stride=stride, padding=padding)\n",
    "        self.conv2 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n",
    "        self.conv3 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n",
    "        self.conv4 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n",
    "        self.pool1 = nn.MaxPool2d(max_kernel)\n",
    "        self.conv5 = nn.Conv2d(32,64,kernel,stride=stride,padding=padding)\n",
    "        self.conv6 = nn.Conv2d(64,64,kernel,stride=stride,padding=padding)\n",
    "        self.conv7 = nn.Conv2d(64,128,kernel,stride=stride,padding=padding)\n",
    "        \n",
    "        self.pool = nn.MaxPool2d((n_window,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "        \n",
    "        self.fc = nn.Linear(n_window*int(4*4*128/n_window),512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.LogSoftmax()\n",
    "\n",
    "    def forward(self, x):\n",
    "        if x.get_device() == 0:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n",
    "        else:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n",
    "        for i in range(7):\n",
    "            tmp[:,i] = self.pool1( F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n",
    "        x = tmp.reshape(x.shape[0], x.shape[1],4*128*4,1)\n",
    "        x = self.pool(x)\n",
    "        x = x.view(x.shape[0],-1)\n",
    "        x = self.fc2(self.fc(x))\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MaxCNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MaxCNN, self).__init__()\n",
    "        \n",
    "        \n",
    "        self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n",
    "        self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.pool1 = nn.MaxPool2d((2,2))\n",
    "        self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n",
    "        \n",
    "        \n",
    "        self.pool = nn.MaxPool2d((7,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "        self.fc = nn.Linear(2044,512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.LogSoftmax()\n",
    "        \n",
    "    def forward(self, x):\n",
    "        if x.get_device() == 0:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n",
    "        else:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n",
    "        for i in range(7):\n",
    "            tmp[:,i] = self.pool1( F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n",
    "        x = tmp.reshape(x.shape[0], x.shape[1],4*128*4,1)\n",
    "        x = self.pool(x)\n",
    "        x = x.view(x.shape[0],-1)\n",
    "        #x = self.drop(x)\n",
    "        x = self.fc2(self.fc(x))\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/5\t of Patient 1\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:34: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-43-18f565b68b5f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     38\u001b[0m                 \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     39\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 40\u001b[1;33m                 \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     41\u001b[0m                 \u001b[0moptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     42\u001b[0m                 \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[0;32m    164\u001b[0m                 \u001b[0mproducts\u001b[0m\u001b[1;33m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    165\u001b[0m         \"\"\"\n\u001b[1;32m--> 166\u001b[1;33m         \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    168\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\autograd\\__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[0;32m     97\u001b[0m     Variable._execution_engine.run_backward(\n\u001b[0;32m     98\u001b[0m         \u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 99\u001b[1;33m         allow_unreachable=True)  # allow_unreachable flag\n\u001b[0m\u001b[0;32m    100\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    101\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "n_rep = 5    \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "for patient in np.unique(Patient):\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = MaxCNN().cuda()\n",
    "        criterion = nn.NLLLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n",
    "        \n",
    "        n_epochs = 45\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                \n",
    "                CNN.to(torch.device(\"cuda\"))\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "                \n",
    "            if epoch==50:\n",
    "                cnn_cpu = CNN.to(torch.device(\"cuda\"))\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "\n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/5\t of Patient 11\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:34: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1,  45] loss: 1.402\tAccuracy : 0.286\t\tval-loss: 1.379\tval-Accuracy : 0.258\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.286\t\tval-loss: 1.368\tval-Accuracy : 0.258\n",
      "[11,  45] loss: 1.207\tAccuracy : 0.482\t\tval-loss: 0.958\tval-Accuracy : 0.516\n",
      "[16,  45] loss: 1.054\tAccuracy : 0.578\t\tval-loss: 0.800\tval-Accuracy : 0.613\n",
      "[21,  45] loss: 0.941\tAccuracy : 0.656\t\tval-loss: 0.727\tval-Accuracy : 0.689\n",
      "[26,  45] loss: 0.793\tAccuracy : 0.741\t\tval-loss: 0.627\tval-Accuracy : 0.787\n",
      "[31,  45] loss: 0.565\tAccuracy : 0.845\t\tval-loss: 0.306\tval-Accuracy : 0.902\n",
      "[36,  45] loss: 0.332\tAccuracy : 0.887\t\tval-loss: 0.214\tval-Accuracy : 0.933\n",
      "[41,  45] loss: 0.232\tAccuracy : 0.912\t\tval-loss: 0.218\tval-Accuracy : 0.920\n",
      "Begin Training rep 2/5\t of Patient 11\n",
      "[1,  45] loss: 1.401\tAccuracy : 0.358\t\tval-loss: 1.379\tval-Accuracy : 0.387\n",
      "[6,  45] loss: 1.389\tAccuracy : 0.286\t\tval-loss: 1.365\tval-Accuracy : 0.258\n",
      "[11,  45] loss: 1.136\tAccuracy : 0.548\t\tval-loss: 1.012\tval-Accuracy : 0.578\n",
      "[16,  45] loss: 1.019\tAccuracy : 0.606\t\tval-loss: 0.904\tval-Accuracy : 0.582\n",
      "[21,  45] loss: 0.908\tAccuracy : 0.665\t\tval-loss: 0.820\tval-Accuracy : 0.640\n",
      "[26,  45] loss: 0.771\tAccuracy : 0.598\t\tval-loss: 0.890\tval-Accuracy : 0.622\n",
      "[31,  45] loss: 0.457\tAccuracy : 0.858\t\tval-loss: 0.644\tval-Accuracy : 0.840\n",
      "[36,  45] loss: 0.326\tAccuracy : 0.889\t\tval-loss: 0.718\tval-Accuracy : 0.871\n",
      "[41,  45] loss: 0.234\tAccuracy : 0.923\t\tval-loss: 0.713\tval-Accuracy : 0.907\n",
      "Begin Training rep 3/5\t of Patient 11\n",
      "[1,  45] loss: 1.403\tAccuracy : 0.286\t\tval-loss: 1.385\tval-Accuracy : 0.258\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.286\t\tval-loss: 1.392\tval-Accuracy : 0.258\n",
      "[11,  45] loss: 1.378\tAccuracy : 0.286\t\tval-loss: 1.351\tval-Accuracy : 0.258\n",
      "[16,  45] loss: 1.115\tAccuracy : 0.550\t\tval-loss: 0.995\tval-Accuracy : 0.587\n",
      "[21,  45] loss: 0.998\tAccuracy : 0.599\t\tval-loss: 0.896\tval-Accuracy : 0.604\n",
      "[26,  45] loss: 0.867\tAccuracy : 0.667\t\tval-loss: 0.762\tval-Accuracy : 0.680\n",
      "[31,  45] loss: 0.635\tAccuracy : 0.816\t\tval-loss: 0.418\tval-Accuracy : 0.880\n",
      "[36,  45] loss: 0.361\tAccuracy : 0.889\t\tval-loss: 0.248\tval-Accuracy : 0.916\n",
      "[41,  45] loss: 0.245\tAccuracy : 0.907\t\tval-loss: 0.197\tval-Accuracy : 0.933\n",
      "Begin Training rep 4/5\t of Patient 11\n",
      "[1,  45] loss: 1.404\tAccuracy : 0.268\t\tval-loss: 1.392\tval-Accuracy : 0.262\n",
      "[6,  45] loss: 1.396\tAccuracy : 0.286\t\tval-loss: 1.410\tval-Accuracy : 0.258\n",
      "[11,  45] loss: 1.218\tAccuracy : 0.483\t\tval-loss: 1.115\tval-Accuracy : 0.520\n",
      "[16,  45] loss: 1.053\tAccuracy : 0.578\t\tval-loss: 0.800\tval-Accuracy : 0.600\n",
      "[21,  45] loss: 0.927\tAccuracy : 0.655\t\tval-loss: 0.742\tval-Accuracy : 0.662\n",
      "[26,  45] loss: 0.795\tAccuracy : 0.767\t\tval-loss: 0.661\tval-Accuracy : 0.787\n",
      "[31,  45] loss: 0.509\tAccuracy : 0.793\t\tval-loss: 0.645\tval-Accuracy : 0.804\n",
      "[36,  45] loss: 0.381\tAccuracy : 0.884\t\tval-loss: 0.299\tval-Accuracy : 0.920\n",
      "[41,  45] loss: 0.299\tAccuracy : 0.897\t\tval-loss: 0.265\tval-Accuracy : 0.907\n",
      "Begin Training rep 5/5\t of Patient 11\n",
      "[1,  45] loss: 1.404\tAccuracy : 0.285\t\tval-loss: 1.393\tval-Accuracy : 0.258\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.286\t\tval-loss: 1.407\tval-Accuracy : 0.258\n",
      "[11,  45] loss: 1.396\tAccuracy : 0.286\t\tval-loss: 1.408\tval-Accuracy : 0.258\n",
      "[16,  45] loss: 1.238\tAccuracy : 0.454\t\tval-loss: 1.161\tval-Accuracy : 0.498\n",
      "[21,  45] loss: 1.053\tAccuracy : 0.570\t\tval-loss: 1.031\tval-Accuracy : 0.578\n",
      "[26,  45] loss: 0.924\tAccuracy : 0.627\t\tval-loss: 0.993\tval-Accuracy : 0.604\n",
      "[31,  45] loss: 0.763\tAccuracy : 0.742\t\tval-loss: 0.782\tval-Accuracy : 0.751\n",
      "[36,  45] loss: 0.576\tAccuracy : 0.750\t\tval-loss: 0.740\tval-Accuracy : 0.769\n",
      "[41,  45] loss: 0.390\tAccuracy : 0.881\t\tval-loss: 0.282\tval-Accuracy : 0.929\n",
      "loss: 0.203\tAccuracy : 0.904\t\tval-loss: 0.335\tval-Accuracy : 0.919\n",
      "Begin Training rep 1/5\t of Patient 12\n",
      "[1,  45] loss: 1.402\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[6,  45] loss: 1.262\tAccuracy : 0.450\t\tval-loss: 1.258\tval-Accuracy : 0.493\n",
      "[11,  45] loss: 1.069\tAccuracy : 0.572\t\tval-loss: 1.008\tval-Accuracy : 0.622\n",
      "[16,  45] loss: 0.983\tAccuracy : 0.614\t\tval-loss: 1.032\tval-Accuracy : 0.622\n",
      "[21,  45] loss: 0.852\tAccuracy : 0.650\t\tval-loss: 1.417\tval-Accuracy : 0.631\n",
      "[26,  45] loss: 0.659\tAccuracy : 0.766\t\tval-loss: 1.111\tval-Accuracy : 0.700\n",
      "[31,  45] loss: 0.426\tAccuracy : 0.883\t\tval-loss: 0.603\tval-Accuracy : 0.843\n",
      "[36,  45] loss: 0.291\tAccuracy : 0.898\t\tval-loss: 0.524\tval-Accuracy : 0.829\n",
      "[41,  45] loss: 0.191\tAccuracy : 0.915\t\tval-loss: 0.421\tval-Accuracy : 0.894\n",
      "Begin Training rep 2/5\t of Patient 12\n",
      "[1,  45] loss: 1.402\tAccuracy : 0.285\t\tval-loss: 1.385\tval-Accuracy : 0.263\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.285\t\tval-loss: 1.385\tval-Accuracy : 0.263\n",
      "[11,  45] loss: 1.238\tAccuracy : 0.468\t\tval-loss: 1.232\tval-Accuracy : 0.488\n",
      "[16,  45] loss: 1.052\tAccuracy : 0.534\t\tval-loss: 1.095\tval-Accuracy : 0.562\n",
      "[21,  45] loss: 0.943\tAccuracy : 0.623\t\tval-loss: 0.969\tval-Accuracy : 0.631\n",
      "[26,  45] loss: 0.790\tAccuracy : 0.736\t\tval-loss: 0.996\tval-Accuracy : 0.705\n",
      "[31,  45] loss: 0.522\tAccuracy : 0.852\t\tval-loss: 0.756\tval-Accuracy : 0.742\n",
      "[36,  45] loss: 0.351\tAccuracy : 0.882\t\tval-loss: 0.699\tval-Accuracy : 0.779\n",
      "[41,  45] loss: 0.274\tAccuracy : 0.899\t\tval-loss: 0.653\tval-Accuracy : 0.788\n",
      "Begin Training rep 3/5\t of Patient 12\n",
      "[1,  45] loss: 1.403\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[6,  45] loss: 1.396\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[11,  45] loss: 1.215\tAccuracy : 0.490\t\tval-loss: 1.180\tval-Accuracy : 0.544\n",
      "[16,  45] loss: 1.044\tAccuracy : 0.551\t\tval-loss: 0.995\tval-Accuracy : 0.576\n",
      "[21,  45] loss: 0.947\tAccuracy : 0.605\t\tval-loss: 0.976\tval-Accuracy : 0.581\n",
      "[26,  45] loss: 0.819\tAccuracy : 0.723\t\tval-loss: 0.926\tval-Accuracy : 0.691\n",
      "[31,  45] loss: 0.541\tAccuracy : 0.843\t\tval-loss: 0.697\tval-Accuracy : 0.760\n",
      "[36,  45] loss: 0.323\tAccuracy : 0.851\t\tval-loss: 1.152\tval-Accuracy : 0.673\n",
      "[41,  45] loss: 0.197\tAccuracy : 0.887\t\tval-loss: 1.182\tval-Accuracy : 0.742\n",
      "Begin Training rep 4/5\t of Patient 12\n",
      "[1,  45] loss: 1.404\tAccuracy : 0.287\t\tval-loss: 1.385\tval-Accuracy : 0.263\n",
      "[6,  45] loss: 1.392\tAccuracy : 0.285\t\tval-loss: 1.380\tval-Accuracy : 0.263\n",
      "[11,  45] loss: 1.132\tAccuracy : 0.528\t\tval-loss: 1.179\tval-Accuracy : 0.581\n",
      "[16,  45] loss: 0.999\tAccuracy : 0.591\t\tval-loss: 1.066\tval-Accuracy : 0.618\n",
      "[21,  45] loss: 0.879\tAccuracy : 0.673\t\tval-loss: 1.006\tval-Accuracy : 0.599\n",
      "[26,  45] loss: 0.689\tAccuracy : 0.733\t\tval-loss: 1.066\tval-Accuracy : 0.641\n",
      "[31,  45] loss: 0.460\tAccuracy : 0.790\t\tval-loss: 0.968\tval-Accuracy : 0.691\n",
      "[36,  45] loss: 0.288\tAccuracy : 0.889\t\tval-loss: 0.541\tval-Accuracy : 0.843\n",
      "[41,  45] loss: 0.218\tAccuracy : 0.917\t\tval-loss: 0.455\tval-Accuracy : 0.871\n",
      "Begin Training rep 5/5\t of Patient 12\n",
      "[1,  45] loss: 1.403\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[6,  45] loss: 1.252\tAccuracy : 0.456\t\tval-loss: 1.223\tval-Accuracy : 0.530\n",
      "[11,  45] loss: 1.046\tAccuracy : 0.583\t\tval-loss: 1.013\tval-Accuracy : 0.618\n",
      "[16,  45] loss: 0.926\tAccuracy : 0.628\t\tval-loss: 0.999\tval-Accuracy : 0.622\n",
      "[21,  45] loss: 0.741\tAccuracy : 0.743\t\tval-loss: 0.754\tval-Accuracy : 0.719\n",
      "[26,  45] loss: 0.724\tAccuracy : 0.669\t\tval-loss: 1.009\tval-Accuracy : 0.664\n",
      "[31,  45] loss: 0.281\tAccuracy : 0.892\t\tval-loss: 0.760\tval-Accuracy : 0.816\n",
      "[36,  45] loss: 0.236\tAccuracy : 0.905\t\tval-loss: 0.652\tval-Accuracy : 0.839\n",
      "[41,  45] loss: 0.227\tAccuracy : 0.910\t\tval-loss: 0.666\tval-Accuracy : 0.848\n",
      "loss: 0.229\tAccuracy : 0.906\t\tval-loss: 0.675\tval-Accuracy : 0.829\n",
      "Begin Training rep 1/5\t of Patient 13\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "non-empty 3D or 4D input tensor expected but got ndim: 4",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-8-57c097f46e05>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     60\u001b[0m                 \u001b[0mval_loss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     61\u001b[0m                 \u001b[1;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_acc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 62\u001b[1;33m                     \u001b[0mval_outputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcnn_cpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     63\u001b[0m                     \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0midx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_outputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     64\u001b[0m                     \u001b[0mval_loss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_outputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_test\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    539\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-7-a97bbd16b095>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m     26\u001b[0m             \u001b[0mtmp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     27\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 28\u001b[1;33m             \u001b[0mtmp\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv7\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv6\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv5\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv4\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv3\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     29\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtmp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     30\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    539\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\pooling.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    139\u001b[0m         return F.max_pool2d(input, self.kernel_size, self.stride,\n\u001b[0;32m    140\u001b[0m                             \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpadding\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdilation\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mceil_mode\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 141\u001b[1;33m                             self.return_indices)\n\u001b[0m\u001b[0;32m    142\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    143\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\_jit_internal.py\u001b[0m in \u001b[0;36mfn\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    136\u001b[0m             \u001b[1;32mreturn\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    137\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 138\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    139\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    140\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\functional.py\u001b[0m in \u001b[0;36m_max_pool2d\u001b[1;34m(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)\u001b[0m\n\u001b[0;32m    486\u001b[0m         \u001b[0mstride\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mannotate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mList\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    487\u001b[0m     return torch.max_pool2d(\n\u001b[1;32m--> 488\u001b[1;33m         input, kernel_size, stride, padding, dilation, ceil_mode)\n\u001b[0m\u001b[0;32m    489\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    490\u001b[0m max_pool2d = boolean_dispatch(\n",
      "\u001b[1;31mRuntimeError\u001b[0m: non-empty 3D or 4D input tensor expected but got ndim: 4"
     ]
    }
   ],
   "source": [
    "p = 12\n",
    "n_rep = 5    \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "for patient in np.unique(Patient):\n",
    "    patient = patient + 13\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = MaxCNN().cuda()\n",
    "        criterion = nn.NLLLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n",
    "        \n",
    "        n_epochs = 45\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                \n",
    "                CNN.to(torch.device(\"cuda\"))\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "                \n",
    "            if epoch%5==0:\n",
    "                cnn_cpu = CNN.to(torch.device(\"cpu\"))\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "\n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/5\t of Patient 14\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:34: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1,  45] loss: 1.403\tAccuracy : 0.286\t\tval-loss: 1.383\tval-Accuracy : 0.273\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.380\tval-Accuracy : 0.278\n",
      "[11,  45] loss: 1.243\tAccuracy : 0.464\t\tval-loss: 1.235\tval-Accuracy : 0.464\n",
      "[16,  45] loss: 1.071\tAccuracy : 0.552\t\tval-loss: 1.099\tval-Accuracy : 0.565\n",
      "[21,  45] loss: 0.970\tAccuracy : 0.621\t\tval-loss: 0.996\tval-Accuracy : 0.612\n",
      "[26,  45] loss: 0.838\tAccuracy : 0.715\t\tval-loss: 0.963\tval-Accuracy : 0.641\n",
      "[31,  45] loss: 0.609\tAccuracy : 0.831\t\tval-loss: 0.720\tval-Accuracy : 0.785\n",
      "[36,  45] loss: 0.392\tAccuracy : 0.906\t\tval-loss: 0.601\tval-Accuracy : 0.656\n",
      "[41,  45] loss: 0.243\tAccuracy : 0.912\t\tval-loss: 0.485\tval-Accuracy : 0.675\n",
      "Begin Training rep 2/5\t of Patient 14\n",
      "[1,  45] loss: 1.402\tAccuracy : 0.284\t\tval-loss: 1.383\tval-Accuracy : 0.278\n",
      "[6,  45] loss: 1.398\tAccuracy : 0.284\t\tval-loss: 1.381\tval-Accuracy : 0.278\n",
      "[11,  45] loss: 1.338\tAccuracy : 0.398\t\tval-loss: 1.318\tval-Accuracy : 0.469\n",
      "[16,  45] loss: 1.081\tAccuracy : 0.574\t\tval-loss: 1.017\tval-Accuracy : 0.608\n",
      "[21,  45] loss: 0.981\tAccuracy : 0.605\t\tval-loss: 0.967\tval-Accuracy : 0.646\n",
      "[26,  45] loss: 0.912\tAccuracy : 0.650\t\tval-loss: 0.921\tval-Accuracy : 0.656\n",
      "[31,  45] loss: 0.715\tAccuracy : 0.749\t\tval-loss: 0.909\tval-Accuracy : 0.742\n",
      "[36,  45] loss: 0.417\tAccuracy : 0.836\t\tval-loss: 0.597\tval-Accuracy : 0.828\n",
      "[41,  45] loss: 0.338\tAccuracy : 0.889\t\tval-loss: 0.527\tval-Accuracy : 0.833\n",
      "Begin Training rep 3/5\t of Patient 14\n",
      "[1,  45] loss: 1.405\tAccuracy : 0.284\t\tval-loss: 1.384\tval-Accuracy : 0.278\n",
      "[6,  45] loss: 1.262\tAccuracy : 0.446\t\tval-loss: 1.263\tval-Accuracy : 0.474\n",
      "[11,  45] loss: 1.065\tAccuracy : 0.576\t\tval-loss: 1.061\tval-Accuracy : 0.589\n",
      "[16,  45] loss: 0.951\tAccuracy : 0.636\t\tval-loss: 1.003\tval-Accuracy : 0.636\n",
      "[21,  45] loss: 0.815\tAccuracy : 0.670\t\tval-loss: 0.894\tval-Accuracy : 0.651\n",
      "[26,  45] loss: 0.668\tAccuracy : 0.762\t\tval-loss: 0.686\tval-Accuracy : 0.756\n",
      "[31,  45] loss: 0.453\tAccuracy : 0.873\t\tval-loss: 0.530\tval-Accuracy : 0.837\n",
      "[36,  45] loss: 0.275\tAccuracy : 0.909\t\tval-loss: 0.680\tval-Accuracy : 0.665\n",
      "[41,  45] loss: 0.197\tAccuracy : 0.936\t\tval-loss: 0.713\tval-Accuracy : 0.689\n",
      "Begin Training rep 4/5\t of Patient 14\n",
      "[1,  45] loss: 1.401\tAccuracy : 0.283\t\tval-loss: 1.383\tval-Accuracy : 0.282\n",
      "[6,  45] loss: 1.398\tAccuracy : 0.284\t\tval-loss: 1.382\tval-Accuracy : 0.278\n",
      "[11,  45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.382\tval-Accuracy : 0.278\n",
      "[16,  45] loss: 1.395\tAccuracy : 0.284\t\tval-loss: 1.380\tval-Accuracy : 0.278\n",
      "[21,  45] loss: 1.186\tAccuracy : 0.495\t\tval-loss: 1.156\tval-Accuracy : 0.502\n",
      "[26,  45] loss: 1.032\tAccuracy : 0.592\t\tval-loss: 1.023\tval-Accuracy : 0.627\n",
      "[31,  45] loss: 0.933\tAccuracy : 0.656\t\tval-loss: 0.981\tval-Accuracy : 0.665\n",
      "[36,  45] loss: 0.709\tAccuracy : 0.796\t\tval-loss: 0.710\tval-Accuracy : 0.799\n",
      "[41,  45] loss: 0.409\tAccuracy : 0.861\t\tval-loss: 0.602\tval-Accuracy : 0.665\n",
      "Begin Training rep 5/5\t of Patient 14\n",
      "[1,  45] loss: 1.403\tAccuracy : 0.284\t\tval-loss: 1.383\tval-Accuracy : 0.278\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.380\tval-Accuracy : 0.278\n",
      "[11,  45] loss: 1.194\tAccuracy : 0.482\t\tval-loss: 1.203\tval-Accuracy : 0.478\n",
      "[16,  45] loss: 1.020\tAccuracy : 0.572\t\tval-loss: 1.169\tval-Accuracy : 0.565\n",
      "[21,  45] loss: 0.914\tAccuracy : 0.645\t\tval-loss: 1.014\tval-Accuracy : 0.612\n",
      "[26,  45] loss: 0.781\tAccuracy : 0.718\t\tval-loss: 0.916\tval-Accuracy : 0.708\n",
      "[31,  45] loss: 0.610\tAccuracy : 0.809\t\tval-loss: 0.580\tval-Accuracy : 0.842\n",
      "[36,  45] loss: 0.403\tAccuracy : 0.870\t\tval-loss: 0.667\tval-Accuracy : 0.646\n",
      "[41,  45] loss: 0.295\tAccuracy : 0.898\t\tval-loss: 0.687\tval-Accuracy : 0.675\n",
      "loss: 0.411\tAccuracy : 0.899\t\tval-loss: 0.603\tval-Accuracy : 0.707\n",
      "Begin Training rep 1/5\t of Patient 15\n",
      "[1,  45] loss: 1.400\tAccuracy : 0.284\t\tval-loss: 1.382\tval-Accuracy : 0.273\n",
      "[6,  45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.381\tval-Accuracy : 0.273\n",
      "[11,  45] loss: 1.222\tAccuracy : 0.477\t\tval-loss: 1.399\tval-Accuracy : 0.309\n",
      "[16,  45] loss: 1.019\tAccuracy : 0.573\t\tval-loss: 1.598\tval-Accuracy : 0.364\n",
      "[21,  45] loss: 0.895\tAccuracy : 0.623\t\tval-loss: 1.710\tval-Accuracy : 0.418\n",
      "[26,  45] loss: 0.726\tAccuracy : 0.690\t\tval-loss: 1.776\tval-Accuracy : 0.414\n",
      "[31,  45] loss: 0.423\tAccuracy : 0.859\t\tval-loss: 2.021\tval-Accuracy : 0.295\n",
      "[36,  45] loss: 0.241\tAccuracy : 0.894\t\tval-loss: 2.203\tval-Accuracy : 0.377\n",
      "[41,  45] loss: 0.184\tAccuracy : 0.940\t\tval-loss: 2.258\tval-Accuracy : 0.395\n"
     ]
    },
    {
     "ename": "IndexError",
     "evalue": "index 13 is out of bounds for axis 1 with size 13",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-14-7a4d43dcb058>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     70\u001b[0m              (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n\u001b[0;32m     71\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 72\u001b[1;33m         \u001b[0mfold_vloss\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mrep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m \u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mval_loss\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     73\u001b[0m         \u001b[0mfold_loss\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mrep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mrunning_loss\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     74\u001b[0m         \u001b[0mfold_vacc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mrep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mval_acc\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mIndexError\u001b[0m: index 13 is out of bounds for axis 1 with size 13"
     ]
    }
   ],
   "source": [
    "p = 12\n",
    "n_rep = 5    \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "for patient in np.unique(Patient):\n",
    "    patient = patient + 13\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = MaxCNN().cuda()\n",
    "        criterion = nn.NLLLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n",
    "        \n",
    "        n_epochs = 45\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                \n",
    "                CNN.to(torch.device(\"cuda\"))\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "                \n",
    "            if epoch%5==0:\n",
    "                cnn_cpu = CNN.to(torch.device(\"cpu\"))\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "\n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sio.savemat('Result/Res_MaxPoolCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n",
    "\n",
    "fig = plt.figure(figsize=(12,10))\n",
    "plt.grid()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.suptitle('Cross-Validation Accuracy\\n MaxPool CNN')\n",
    "ax = plt.gca()\n",
    "plt.xlabel('Patient id')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.savefig('Result/MaxPoolCNN.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TempCNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(TempCNN, self).__init__()\n",
    "        \n",
    "        \n",
    "        self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n",
    "        self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.pool1 = nn.MaxPool2d((2,2))\n",
    "        self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n",
    "        \n",
    "        self.conv8 = nn.Conv1d(7,64,(4*4*128,3),stride=(1,1),padding=1)\n",
    "        \n",
    "        self.pool = nn.MaxPool2d((7,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "        self.fc = nn.Linear(192,4)\n",
    "        self.max = nn.LogSoftmax()\n",
    "        \n",
    "    def forward(self, x):\n",
    "        if x.get_device() == 0:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n",
    "        else:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n",
    "        for i in range(7):\n",
    "            tmp[:,i] = self.pool1( F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n",
    "        x = tmp.reshape(x.shape[0], x.shape[1],4*128*4,1)\n",
    "        x = F.relu(self.conv8(x))\n",
    "        x = x.view(x.shape[0],-1)\n",
    "        x = self.fc(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:33: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-1.3924, -1.3307, -1.3637, -1.4631],\n",
       "        [-1.3927, -1.3307, -1.3638, -1.4627]], device='cuda:0',\n",
       "       grad_fn=<LogSoftmaxBackward>)"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net = TempCNN().cuda()\n",
    "net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/1\t of Patient 9\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:33: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1,  45] loss: 1.404\tAccuracy : 0.283\t\tval-loss: 1.380\tval-Accuracy : 0.287\n",
      "[2,  45] loss: 1.401\tAccuracy : 0.283\t\tval-loss: 1.376\tval-Accuracy : 0.287\n",
      "[3,  45] loss: 1.399\tAccuracy : 0.283\t\tval-loss: 1.373\tval-Accuracy : 0.287\n",
      "[4,  45] loss: 1.398\tAccuracy : 0.283\t\tval-loss: 1.371\tval-Accuracy : 0.287\n",
      "[5,  45] loss: 1.397\tAccuracy : 0.283\t\tval-loss: 1.370\tval-Accuracy : 0.287\n",
      "[6,  45] loss: 1.396\tAccuracy : 0.283\t\tval-loss: 1.366\tval-Accuracy : 0.287\n",
      "[7,  45] loss: 1.390\tAccuracy : 0.283\t\tval-loss: 1.347\tval-Accuracy : 0.287\n",
      "[8,  45] loss: 1.328\tAccuracy : 0.427\t\tval-loss: 1.156\tval-Accuracy : 0.307\n",
      "[9,  45] loss: 1.217\tAccuracy : 0.519\t\tval-loss: 1.032\tval-Accuracy : 0.485\n",
      "[10,  45] loss: 1.113\tAccuracy : 0.565\t\tval-loss: 0.905\tval-Accuracy : 0.604\n",
      "[11,  45] loss: 1.030\tAccuracy : 0.598\t\tval-loss: 0.829\tval-Accuracy : 0.634\n",
      "[12,  45] loss: 0.970\tAccuracy : 0.628\t\tval-loss: 0.782\tval-Accuracy : 0.678\n",
      "[13,  45] loss: 0.921\tAccuracy : 0.655\t\tval-loss: 0.774\tval-Accuracy : 0.688\n",
      "[14,  45] loss: 0.840\tAccuracy : 0.702\t\tval-loss: 0.624\tval-Accuracy : 0.787\n",
      "[15,  45] loss: 0.756\tAccuracy : 0.751\t\tval-loss: 0.581\tval-Accuracy : 0.807\n",
      "[16,  45] loss: 0.757\tAccuracy : 0.783\t\tval-loss: 0.499\tval-Accuracy : 0.851\n",
      "[17,  45] loss: 0.627\tAccuracy : 0.810\t\tval-loss: 0.366\tval-Accuracy : 0.906\n",
      "[18,  45] loss: 0.556\tAccuracy : 0.840\t\tval-loss: 0.308\tval-Accuracy : 0.936\n",
      "[19,  45] loss: 0.448\tAccuracy : 0.868\t\tval-loss: 0.163\tval-Accuracy : 0.960\n",
      "[20,  45] loss: 0.399\tAccuracy : 0.868\t\tval-loss: 0.170\tval-Accuracy : 0.960\n",
      "[21,  45] loss: 0.293\tAccuracy : 0.905\t\tval-loss: 0.132\tval-Accuracy : 0.970\n",
      "[22,  45] loss: 0.242\tAccuracy : 0.925\t\tval-loss: 0.093\tval-Accuracy : 0.980\n",
      "[23,  45] loss: 0.205\tAccuracy : 0.931\t\tval-loss: 0.086\tval-Accuracy : 0.980\n",
      "[24,  45] loss: 0.175\tAccuracy : 0.942\t\tval-loss: 0.078\tval-Accuracy : 0.980\n",
      "[25,  45] loss: 0.159\tAccuracy : 0.941\t\tval-loss: 0.100\tval-Accuracy : 0.975\n",
      "[26,  45] loss: 0.169\tAccuracy : 0.943\t\tval-loss: 0.065\tval-Accuracy : 0.980\n",
      "[27,  45] loss: 0.982\tAccuracy : 0.836\t\tval-loss: 0.336\tval-Accuracy : 0.916\n",
      "[28,  45] loss: 0.310\tAccuracy : 0.915\t\tval-loss: 0.066\tval-Accuracy : 0.990\n",
      "[29,  45] loss: 0.195\tAccuracy : 0.931\t\tval-loss: 0.091\tval-Accuracy : 0.980\n",
      "[30,  45] loss: 0.159\tAccuracy : 0.940\t\tval-loss: 0.096\tval-Accuracy : 0.980\n",
      "[31,  45] loss: 0.155\tAccuracy : 0.933\t\tval-loss: 0.089\tval-Accuracy : 0.975\n",
      "[32,  45] loss: 0.160\tAccuracy : 0.949\t\tval-loss: 0.077\tval-Accuracy : 0.970\n",
      "[33,  45] loss: 0.117\tAccuracy : 0.953\t\tval-loss: 0.090\tval-Accuracy : 0.965\n",
      "[34,  45] loss: 0.109\tAccuracy : 0.950\t\tval-loss: 0.069\tval-Accuracy : 0.980\n",
      "[35,  45] loss: 0.101\tAccuracy : 0.955\t\tval-loss: 0.078\tval-Accuracy : 0.975\n",
      "[36,  45] loss: 0.094\tAccuracy : 0.957\t\tval-loss: 0.080\tval-Accuracy : 0.970\n",
      "[37,  45] loss: 0.090\tAccuracy : 0.958\t\tval-loss: 0.078\tval-Accuracy : 0.975\n",
      "[38,  45] loss: 0.086\tAccuracy : 0.959\t\tval-loss: 0.079\tval-Accuracy : 0.970\n",
      "[39,  45] loss: 0.083\tAccuracy : 0.958\t\tval-loss: 0.080\tval-Accuracy : 0.970\n",
      "[40,  45] loss: 0.080\tAccuracy : 0.960\t\tval-loss: 0.082\tval-Accuracy : 0.965\n",
      "[41,  45] loss: 0.078\tAccuracy : 0.962\t\tval-loss: 0.085\tval-Accuracy : 0.970\n",
      "[42,  45] loss: 0.074\tAccuracy : 0.962\t\tval-loss: 0.086\tval-Accuracy : 0.975\n",
      "[43,  45] loss: 0.072\tAccuracy : 0.963\t\tval-loss: 0.090\tval-Accuracy : 0.970\n",
      "[44,  45] loss: 0.071\tAccuracy : 0.964\t\tval-loss: 0.094\tval-Accuracy : 0.970\n",
      "[45,  45] loss: 0.069\tAccuracy : 0.963\t\tval-loss: 0.099\tval-Accuracy : 0.970\n",
      "loss: 0.069\tAccuracy : 0.963\t\tval-loss: 0.099\tval-Accuracy : 0.970\n",
      "Begin Training rep 1/1\t of Patient 12\n",
      "[1,  45] loss: 1.404\tAccuracy : 0.240\t\tval-loss: 1.385\tval-Accuracy : 0.240\n",
      "[2,  45] loss: 1.401\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[3,  45] loss: 1.399\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[4,  45] loss: 1.398\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[5,  45] loss: 1.397\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n",
      "[6,  45] loss: 1.396\tAccuracy : 0.285\t\tval-loss: 1.383\tval-Accuracy : 0.263\n",
      "[7,  45] loss: 1.393\tAccuracy : 0.285\t\tval-loss: 1.378\tval-Accuracy : 0.263\n",
      "[8,  45] loss: 1.358\tAccuracy : 0.344\t\tval-loss: 1.328\tval-Accuracy : 0.318\n",
      "[9,  45] loss: 1.249\tAccuracy : 0.497\t\tval-loss: 1.229\tval-Accuracy : 0.530\n",
      "[10,  45] loss: 1.139\tAccuracy : 0.607\t\tval-loss: 1.209\tval-Accuracy : 0.571\n",
      "[11,  45] loss: 1.048\tAccuracy : 0.633\t\tval-loss: 1.135\tval-Accuracy : 0.553\n",
      "[12,  45] loss: 0.962\tAccuracy : 0.654\t\tval-loss: 1.078\tval-Accuracy : 0.562\n",
      "[13,  45] loss: 0.865\tAccuracy : 0.687\t\tval-loss: 1.236\tval-Accuracy : 0.581\n",
      "[14,  45] loss: 0.781\tAccuracy : 0.735\t\tval-loss: 1.049\tval-Accuracy : 0.636\n",
      "[15,  45] loss: 0.681\tAccuracy : 0.764\t\tval-loss: 1.046\tval-Accuracy : 0.608\n",
      "[16,  45] loss: 0.588\tAccuracy : 0.788\t\tval-loss: 1.112\tval-Accuracy : 0.641\n",
      "[17,  45] loss: 0.558\tAccuracy : 0.821\t\tval-loss: 0.808\tval-Accuracy : 0.760\n",
      "[18,  45] loss: 0.467\tAccuracy : 0.837\t\tval-loss: 1.016\tval-Accuracy : 0.747\n",
      "[19,  45] loss: 0.453\tAccuracy : 0.851\t\tval-loss: 0.793\tval-Accuracy : 0.816\n",
      "[20,  45] loss: 0.519\tAccuracy : 0.819\t\tval-loss: 0.884\tval-Accuracy : 0.770\n",
      "[21,  45] loss: 0.449\tAccuracy : 0.867\t\tval-loss: 0.907\tval-Accuracy : 0.751\n",
      "[22,  45] loss: 0.298\tAccuracy : 0.885\t\tval-loss: 0.991\tval-Accuracy : 0.779\n",
      "[23,  45] loss: 0.231\tAccuracy : 0.888\t\tval-loss: 1.303\tval-Accuracy : 0.760\n",
      "[24,  45] loss: 0.354\tAccuracy : 0.890\t\tval-loss: 0.884\tval-Accuracy : 0.760\n",
      "[25,  45] loss: 0.228\tAccuracy : 0.915\t\tval-loss: 1.024\tval-Accuracy : 0.802\n",
      "[26,  45] loss: 0.277\tAccuracy : 0.888\t\tval-loss: 0.807\tval-Accuracy : 0.765\n",
      "[27,  45] loss: 0.247\tAccuracy : 0.859\t\tval-loss: 0.909\tval-Accuracy : 0.751\n",
      "[28,  45] loss: 0.177\tAccuracy : 0.930\t\tval-loss: 1.099\tval-Accuracy : 0.829\n",
      "[29,  45] loss: 0.139\tAccuracy : 0.936\t\tval-loss: 1.224\tval-Accuracy : 0.829\n",
      "[30,  45] loss: 0.125\tAccuracy : 0.937\t\tval-loss: 1.362\tval-Accuracy : 0.820\n",
      "[31,  45] loss: 0.113\tAccuracy : 0.937\t\tval-loss: 1.460\tval-Accuracy : 0.825\n",
      "[32,  45] loss: 0.105\tAccuracy : 0.939\t\tval-loss: 1.542\tval-Accuracy : 0.829\n",
      "[33,  45] loss: 0.100\tAccuracy : 0.939\t\tval-loss: 1.617\tval-Accuracy : 0.839\n",
      "[34,  45] loss: 0.095\tAccuracy : 0.944\t\tval-loss: 1.673\tval-Accuracy : 0.839\n",
      "[35,  45] loss: 0.091\tAccuracy : 0.946\t\tval-loss: 1.712\tval-Accuracy : 0.848\n",
      "[36,  45] loss: 0.087\tAccuracy : 0.947\t\tval-loss: 1.763\tval-Accuracy : 0.848\n",
      "[37,  45] loss: 0.084\tAccuracy : 0.948\t\tval-loss: 1.797\tval-Accuracy : 0.857\n",
      "[38,  45] loss: 0.082\tAccuracy : 0.949\t\tval-loss: 1.817\tval-Accuracy : 0.857\n",
      "[39,  45] loss: 0.079\tAccuracy : 0.952\t\tval-loss: 1.853\tval-Accuracy : 0.857\n",
      "[40,  45] loss: 0.077\tAccuracy : 0.955\t\tval-loss: 1.889\tval-Accuracy : 0.853\n",
      "[41,  45] loss: 0.075\tAccuracy : 0.956\t\tval-loss: 1.911\tval-Accuracy : 0.853\n",
      "[42,  45] loss: 0.074\tAccuracy : 0.957\t\tval-loss: 1.904\tval-Accuracy : 0.857\n",
      "[43,  45] loss: 0.072\tAccuracy : 0.961\t\tval-loss: 2.002\tval-Accuracy : 0.862\n",
      "[44,  45] loss: 0.071\tAccuracy : 0.965\t\tval-loss: 1.996\tval-Accuracy : 0.862\n",
      "[45,  45] loss: 0.070\tAccuracy : 0.962\t\tval-loss: 2.043\tval-Accuracy : 0.871\n",
      "loss: 0.070\tAccuracy : 0.962\t\tval-loss: 2.043\tval-Accuracy : 0.871\n",
      "Begin Training rep 1/1\t of Patient 10\n",
      "[1,  45] loss: 1.402\tAccuracy : 0.285\t\tval-loss: 1.382\tval-Accuracy : 0.271\n",
      "[2,  45] loss: 1.400\tAccuracy : 0.285\t\tval-loss: 1.380\tval-Accuracy : 0.271\n",
      "[3,  45] loss: 1.398\tAccuracy : 0.285\t\tval-loss: 1.379\tval-Accuracy : 0.271\n",
      "[4,  45] loss: 1.396\tAccuracy : 0.285\t\tval-loss: 1.374\tval-Accuracy : 0.271\n",
      "[5,  45] loss: 1.378\tAccuracy : 0.285\t\tval-loss: 1.302\tval-Accuracy : 0.271\n",
      "[6,  45] loss: 1.270\tAccuracy : 0.467\t\tval-loss: 1.120\tval-Accuracy : 0.467\n",
      "[7,  45] loss: 1.172\tAccuracy : 0.559\t\tval-loss: 1.016\tval-Accuracy : 0.562\n",
      "[8,  45] loss: 1.097\tAccuracy : 0.586\t\tval-loss: 0.936\tval-Accuracy : 0.600\n",
      "[9,  45] loss: 1.027\tAccuracy : 0.620\t\tval-loss: 0.890\tval-Accuracy : 0.648\n",
      "[10,  45] loss: 0.963\tAccuracy : 0.657\t\tval-loss: 0.813\tval-Accuracy : 0.671\n",
      "[11,  45] loss: 0.887\tAccuracy : 0.700\t\tval-loss: 0.712\tval-Accuracy : 0.733\n",
      "[12,  45] loss: 0.842\tAccuracy : 0.714\t\tval-loss: 0.636\tval-Accuracy : 0.786\n",
      "[13,  45] loss: 0.764\tAccuracy : 0.754\t\tval-loss: 0.595\tval-Accuracy : 0.814\n",
      "[14,  45] loss: 0.690\tAccuracy : 0.784\t\tval-loss: 0.482\tval-Accuracy : 0.838\n",
      "[15,  45] loss: 0.628\tAccuracy : 0.815\t\tval-loss: 0.438\tval-Accuracy : 0.857\n",
      "[16,  45] loss: 0.604\tAccuracy : 0.826\t\tval-loss: 0.375\tval-Accuracy : 0.895\n",
      "[17,  45] loss: 0.553\tAccuracy : 0.840\t\tval-loss: 0.348\tval-Accuracy : 0.900\n",
      "[18,  45] loss: 0.425\tAccuracy : 0.867\t\tval-loss: 0.279\tval-Accuracy : 0.938\n",
      "[19,  45] loss: 0.396\tAccuracy : 0.889\t\tval-loss: 0.228\tval-Accuracy : 0.957\n",
      "[20,  45] loss: 0.338\tAccuracy : 0.893\t\tval-loss: 0.194\tval-Accuracy : 0.957\n",
      "[21,  45] loss: 0.262\tAccuracy : 0.914\t\tval-loss: 0.182\tval-Accuracy : 0.952\n",
      "[22,  45] loss: 0.219\tAccuracy : 0.925\t\tval-loss: 0.217\tval-Accuracy : 0.943\n",
      "[23,  45] loss: 0.224\tAccuracy : 0.934\t\tval-loss: 0.160\tval-Accuracy : 0.957\n",
      "[24,  45] loss: 0.256\tAccuracy : 0.901\t\tval-loss: 0.193\tval-Accuracy : 0.952\n",
      "[25,  45] loss: 0.266\tAccuracy : 0.921\t\tval-loss: 0.134\tval-Accuracy : 0.952\n",
      "[26,  45] loss: 0.172\tAccuracy : 0.945\t\tval-loss: 0.153\tval-Accuracy : 0.962\n",
      "[27,  45] loss: 0.139\tAccuracy : 0.950\t\tval-loss: 0.152\tval-Accuracy : 0.962\n",
      "[28,  45] loss: 0.121\tAccuracy : 0.953\t\tval-loss: 0.153\tval-Accuracy : 0.957\n",
      "[29,  45] loss: 0.111\tAccuracy : 0.954\t\tval-loss: 0.155\tval-Accuracy : 0.962\n",
      "[30,  45] loss: 0.103\tAccuracy : 0.955\t\tval-loss: 0.158\tval-Accuracy : 0.957\n",
      "[31,  45] loss: 0.098\tAccuracy : 0.955\t\tval-loss: 0.162\tval-Accuracy : 0.957\n",
      "[32,  45] loss: 0.093\tAccuracy : 0.959\t\tval-loss: 0.162\tval-Accuracy : 0.952\n",
      "[33,  45] loss: 0.088\tAccuracy : 0.959\t\tval-loss: 0.169\tval-Accuracy : 0.952\n",
      "[34,  45] loss: 0.085\tAccuracy : 0.959\t\tval-loss: 0.181\tval-Accuracy : 0.943\n",
      "[35,  45] loss: 0.082\tAccuracy : 0.959\t\tval-loss: 0.190\tval-Accuracy : 0.938\n",
      "[36,  45] loss: 0.080\tAccuracy : 0.960\t\tval-loss: 0.200\tval-Accuracy : 0.938\n",
      "[37,  45] loss: 0.077\tAccuracy : 0.960\t\tval-loss: 0.209\tval-Accuracy : 0.938\n",
      "[38,  45] loss: 0.075\tAccuracy : 0.961\t\tval-loss: 0.214\tval-Accuracy : 0.938\n",
      "[39,  45] loss: 0.073\tAccuracy : 0.961\t\tval-loss: 0.218\tval-Accuracy : 0.938\n",
      "[40,  45] loss: 0.071\tAccuracy : 0.961\t\tval-loss: 0.228\tval-Accuracy : 0.938\n",
      "[41,  45] loss: 0.069\tAccuracy : 0.962\t\tval-loss: 0.236\tval-Accuracy : 0.938\n",
      "[42,  45] loss: 0.068\tAccuracy : 0.962\t\tval-loss: 0.248\tval-Accuracy : 0.943\n",
      "[43,  45] loss: 0.067\tAccuracy : 0.963\t\tval-loss: 0.256\tval-Accuracy : 0.943\n",
      "[44,  45] loss: 0.066\tAccuracy : 0.963\t\tval-loss: 0.261\tval-Accuracy : 0.938\n",
      "[45,  45] loss: 0.065\tAccuracy : 0.963\t\tval-loss: 0.271\tval-Accuracy : 0.938\n",
      "loss: 0.065\tAccuracy : 0.963\t\tval-loss: 0.271\tval-Accuracy : 0.938\n",
      "Begin Training rep 1/1\t of Patient 8\n",
      "[1,  45] loss: 1.400\tAccuracy : 0.283\t\tval-loss: 1.373\tval-Accuracy : 0.290\n",
      "[2,  45] loss: 1.399\tAccuracy : 0.283\t\tval-loss: 1.370\tval-Accuracy : 0.290\n",
      "[3,  45] loss: 1.398\tAccuracy : 0.283\t\tval-loss: 1.367\tval-Accuracy : 0.290\n",
      "[4,  45] loss: 1.397\tAccuracy : 0.283\t\tval-loss: 1.364\tval-Accuracy : 0.290\n",
      "[5,  45] loss: 1.395\tAccuracy : 0.283\t\tval-loss: 1.359\tval-Accuracy : 0.290\n",
      "[6,  45] loss: 1.385\tAccuracy : 0.283\t\tval-loss: 1.320\tval-Accuracy : 0.290\n",
      "[7,  45] loss: 1.287\tAccuracy : 0.499\t\tval-loss: 1.221\tval-Accuracy : 0.373\n",
      "[8,  45] loss: 1.172\tAccuracy : 0.560\t\tval-loss: 1.076\tval-Accuracy : 0.534\n",
      "[9,  45] loss: 1.078\tAccuracy : 0.619\t\tval-loss: 0.941\tval-Accuracy : 0.627\n",
      "[10,  45] loss: 0.986\tAccuracy : 0.677\t\tval-loss: 0.871\tval-Accuracy : 0.705\n",
      "[11,  45] loss: 0.886\tAccuracy : 0.705\t\tval-loss: 0.763\tval-Accuracy : 0.705\n",
      "[12,  45] loss: 0.829\tAccuracy : 0.727\t\tval-loss: 0.767\tval-Accuracy : 0.746\n",
      "[13,  45] loss: 0.714\tAccuracy : 0.778\t\tval-loss: 0.555\tval-Accuracy : 0.855\n",
      "[14,  45] loss: 0.628\tAccuracy : 0.802\t\tval-loss: 0.532\tval-Accuracy : 0.834\n",
      "[15,  45] loss: 0.625\tAccuracy : 0.825\t\tval-loss: 0.472\tval-Accuracy : 0.886\n",
      "[16,  45] loss: 0.508\tAccuracy : 0.859\t\tval-loss: 0.493\tval-Accuracy : 0.902\n",
      "[17,  45] loss: 0.379\tAccuracy : 0.887\t\tval-loss: 0.454\tval-Accuracy : 0.902\n",
      "[18,  45] loss: 0.389\tAccuracy : 0.869\t\tval-loss: 0.445\tval-Accuracy : 0.907\n",
      "[19,  45] loss: 0.326\tAccuracy : 0.897\t\tval-loss: 0.337\tval-Accuracy : 0.933\n",
      "[20,  45] loss: 0.279\tAccuracy : 0.907\t\tval-loss: 0.660\tval-Accuracy : 0.948\n",
      "[21,  45] loss: 0.210\tAccuracy : 0.926\t\tval-loss: 0.715\tval-Accuracy : 0.938\n",
      "[22,  45] loss: 0.170\tAccuracy : 0.931\t\tval-loss: 0.685\tval-Accuracy : 0.943\n",
      "[23,  45] loss: 0.146\tAccuracy : 0.931\t\tval-loss: 0.567\tval-Accuracy : 0.943\n",
      "[24,  45] loss: 0.131\tAccuracy : 0.942\t\tval-loss: 0.690\tval-Accuracy : 0.943\n",
      "[25,  45] loss: 0.118\tAccuracy : 0.944\t\tval-loss: 0.750\tval-Accuracy : 0.943\n",
      "[26,  45] loss: 0.110\tAccuracy : 0.947\t\tval-loss: 0.761\tval-Accuracy : 0.938\n",
      "[27,  45] loss: 0.104\tAccuracy : 0.948\t\tval-loss: 0.730\tval-Accuracy : 0.938\n",
      "[28,  45] loss: 0.099\tAccuracy : 0.948\t\tval-loss: 0.796\tval-Accuracy : 0.938\n",
      "[29,  45] loss: 0.094\tAccuracy : 0.950\t\tval-loss: 0.847\tval-Accuracy : 0.938\n",
      "[30,  45] loss: 0.090\tAccuracy : 0.950\t\tval-loss: 0.866\tval-Accuracy : 0.943\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-41-e7fae06453d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     52\u001b[0m                 \u001b[0macc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     53\u001b[0m                 \u001b[1;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0macc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 54\u001b[1;33m                     \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0midx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     55\u001b[0m                     \u001b[0macc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0midx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"cpu\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     56\u001b[0m                 \u001b[0macc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0macc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    539\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-16-e790eb8c12b5>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m     26\u001b[0m             \u001b[0mtmp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     27\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 28\u001b[1;33m             \u001b[0mtmp\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv7\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv6\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv5\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv4\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv3\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     29\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtmp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     30\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv8\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m    539\u001b[0m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    540\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m             \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    542\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    543\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\pooling.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    139\u001b[0m         return F.max_pool2d(input, self.kernel_size, self.stride,\n\u001b[0;32m    140\u001b[0m                             \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpadding\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdilation\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mceil_mode\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 141\u001b[1;33m                             self.return_indices)\n\u001b[0m\u001b[0;32m    142\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    143\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\_jit_internal.py\u001b[0m in \u001b[0;36mfn\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    136\u001b[0m             \u001b[1;32mreturn\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    137\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 138\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    139\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    140\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\functional.py\u001b[0m in \u001b[0;36m_max_pool2d\u001b[1;34m(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)\u001b[0m\n\u001b[0;32m    486\u001b[0m         \u001b[0mstride\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mannotate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mList\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    487\u001b[0m     return torch.max_pool2d(\n\u001b[1;32m--> 488\u001b[1;33m         input, kernel_size, stride, padding, dilation, ceil_mode)\n\u001b[0m\u001b[0;32m    489\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    490\u001b[0m max_pool2d = boolean_dispatch(\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "n_rep = 1  \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "un = np.unique(Patient)\n",
    "np.random.shuffle(un)\n",
    "for patient in un:\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = TempCNN().cuda()\n",
    "        criterion = nn.NLLLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n",
    "        \n",
    "        n_epochs = 45\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                \n",
    "                CNN.to(torch.device(\"cuda\"))\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "                \n",
    "            if epoch%1==0:\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "                \n",
    "            if epoch==900:\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "\n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'sio' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-e97a8aad55e1>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0msio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavemat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Result/Res_TemporalCNN.mat'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m{\u001b[0m\u001b[1;34m\"loss\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_loss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m\"acc\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_acc\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m\"val loss\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_vloss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m\"val acc\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_vacc\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[0mfig\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfigure\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfigsize\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m12\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgrid\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mboxplot\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfold_vacc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'sio' is not defined"
     ]
    }
   ],
   "source": [
    "sio.savemat('Result/Res_TemporalCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n",
    "\n",
    "fig = plt.figure(figsize=(12,10))\n",
    "plt.grid()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.suptitle('Cross-Validation Accuracy\\n Temporal CNN')\n",
    "ax = plt.gca()\n",
    "plt.xlabel('Patient id')\n",
    "plt.ylabel('Accuracy')\n",
    "plt.savefig('Result/TemporalCNN.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/1\t of Patient 12\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'TempCNN' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-4-e7fae06453d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     25\u001b[0m              (rep+1,n_rep, patient))\n\u001b[0;32m     26\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m         \u001b[0mCNN\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mTempCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     28\u001b[0m         \u001b[0mcriterion\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mNLLLoss\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     29\u001b[0m         \u001b[0moptimizer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0moptim\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSGD\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mCNN\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlr\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.01\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'TempCNN' is not defined"
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "n_rep = 1  \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "un = np.unique(Patient)\n",
    "np.random.shuffle(un)\n",
    "for patient in un:\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = TempCNN().cuda()\n",
    "        criterion = nn.NLLLoss()\n",
    "        optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n",
    "        \n",
    "        n_epochs = 45\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                \n",
    "                CNN.to(torch.device(\"cuda\"))\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()\n",
    "                \n",
    "            if epoch%1==0:\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "                \n",
    "            if epoch==900:\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "\n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTM(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(LSTM, self).__init__()\n",
    "        \n",
    "        \n",
    "        self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n",
    "        self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.pool1 = nn.MaxPool2d((2,2))\n",
    "        self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n",
    "        \n",
    "        self.lstm = nn.LSTM(4*4*128,128,7)\n",
    "        \n",
    "        self.pool = nn.MaxPool2d((7,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "        self.fc = nn.Linear(896,4)\n",
    "        self.max = nn.LogSoftmax()\n",
    "        \n",
    "        self.hidden = self.init_hidden()\n",
    "\n",
    "    def init_hidden(self):\n",
    "        return (torch.randn(7, 7, 128).cuda(),\n",
    "                torch.randn(7, 7, 128).cuda())    \n",
    "        \n",
    "    def forward(self, x):\n",
    "        if x.get_device() == 0:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n",
    "        else:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n",
    "        for i in range(7):\n",
    "            tmp[:,i] = self.pool1(F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n",
    "        x = tmp.reshape(x.shape[0], x.shape[1],4*128*4)\n",
    "        \n",
    "        lstm_out, self.hidden = self.lstm(x,self.hidden)\n",
    "        x = lstm_out.view(x.shape[0],-1)\n",
    "        \n",
    "        x = self.fc(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTM(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(LSTM, self).__init__()\n",
    "        \n",
    "        \n",
    "        self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n",
    "        self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.pool1 = nn.MaxPool2d((2,2))\n",
    "        self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n",
    "        \n",
    "        self.lstm = nn.RNN(4*4*128,128,7)\n",
    "        \n",
    "        self.pool = nn.MaxPool2d((7,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "        self.fc = nn.Linear(896,4)\n",
    "        self.max = nn.LogSoftmax()\n",
    "        \n",
    "        self.lstm_out = torch.zeros(2,7,128)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        if x.get_device() == 0:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n",
    "        else:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n",
    "        for i in range(7):\n",
    "            img = x[:,i]\n",
    "            img = F.relu(self.conv1(img))\n",
    "            img = F.relu(self.conv2(img))\n",
    "            img = F.relu(self.conv3(img))\n",
    "            img = F.relu(self.conv4(img))\n",
    "            img = self.pool1(img)\n",
    "            img = F.relu(self.conv5(img))\n",
    "            img = F.relu(self.conv6(img))\n",
    "            img = self.pool1(img)\n",
    "            img = F.relu(self.conv7(img))\n",
    "            #x[:,i,]\n",
    "            tmp[:,i] = self.pool1(img)\n",
    "            del img\n",
    "            #tmp[:,i] = self.pool1(F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n",
    "        x = tmp.reshape(x.shape[0], x.shape[1],4*128*4)\n",
    "        del tmp\n",
    "        #self.lstm_out, self.hidden = self.lstm(x,self.hidden)\n",
    "        self.lstm_out, _ = self.lstm(x)\n",
    "        \n",
    "        x = self.lstm_out.view(x.shape[0],-1)\n",
    "        x = self.fc(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:51: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-1.3417, -1.3869, -1.4236, -1.3948],\n",
       "        [-1.3531, -1.3826, -1.4254, -1.3854]], device='cuda:0',\n",
       "       grad_fn=<LogSoftmaxBackward>)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net = LSTM().cuda()\n",
    "net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/1\t of Patient 6\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:51: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1,  50] loss: 1.401\tAccuracy : 0.284\t\tval-loss: 1.375\tval-Accuracy : 0.291\n",
      "[6,  50] loss: 0.759\tAccuracy : 0.744\t\tval-loss: 0.395\tval-Accuracy : 0.898\n",
      "[11,  50] loss: 0.180\tAccuracy : 0.951\t\tval-loss: 0.045\tval-Accuracy : 0.995\n",
      "[16,  50] loss: 0.116\tAccuracy : 0.970\t\tval-loss: 0.029\tval-Accuracy : 0.995\n",
      "[21,  50] loss: 0.067\tAccuracy : 0.979\t\tval-loss: 0.028\tval-Accuracy : 0.995\n",
      "[26,  50] loss: 0.033\tAccuracy : 0.987\t\tval-loss: 0.017\tval-Accuracy : 0.990\n"
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "n_rep = 1  \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "un = np.unique(Patient)\n",
    "np.random.shuffle(un)\n",
    "for patient in un:\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = LSTM().cuda()\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        optimizer = optim.Adam(CNN.parameters(), lr=0.0001)\n",
    "        \n",
    "        \n",
    "        n_epochs = 50\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                #print(i)\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda().detach())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward(retain_graph=True)\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()         \n",
    "                \n",
    "                \n",
    "            if epoch%5==0:\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "                \n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Mix Architecture"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Mix(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Mix, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n",
    "        self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n",
    "        self.pool1 = nn.MaxPool2d((2,2))\n",
    "        self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n",
    "        self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n",
    "        \n",
    "        #\n",
    "        self.conv8 = nn.Conv2d(7,64,(4*4*128,3),stride=(1,1),padding=1)\n",
    "        self.lstm = nn.RNN(4*4*128,128,7)\n",
    "        \n",
    "        self.pool = nn.MaxPool2d((7,1))\n",
    "        self.drop = nn.Dropout(p=0.5)\n",
    "        self.fc1 = nn.Linear(1088,512)\n",
    "        self.fc2 = nn.Linear(512,4)\n",
    "        self.max = nn.LogSoftmax()\n",
    "        \n",
    "        self.lstm_out = torch.zeros(2,7,128)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        if x.get_device() == 0:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n",
    "        else:\n",
    "            tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n",
    "        for i in range(7):\n",
    "            img = x[:,i]\n",
    "            img = F.relu(self.conv1(img))\n",
    "            img = F.relu(self.conv2(img))\n",
    "            img = F.relu(self.conv3(img))\n",
    "            img = F.relu(self.conv4(img))\n",
    "            img = self.pool1(img)\n",
    "            img = F.relu(self.conv5(img))\n",
    "            img = F.relu(self.conv6(img))\n",
    "            img = self.pool1(img)\n",
    "            img = F.relu(self.conv7(img))\n",
    "            #x[:,i,]\n",
    "            tmp[:,i] = self.pool1(img)\n",
    "            del img\n",
    "            #tmp[:,i] = self.pool1(F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n",
    "        \n",
    "        temp_conv = F.relu(self.conv8(tmp.reshape(x.shape[0], x.shape[1], 4*128*4,1)))\n",
    "        temp_conv = temp_conv.reshape(temp_conv.shape[0],-1)\n",
    "        \n",
    "        self.lstm_out, _ = self.lstm(tmp.reshape(x.shape[0], x.shape[1], 4*128*4))\n",
    "        lstm = self.lstm_out.view(x.shape[0],-1)\n",
    "        \n",
    "        x = torch.cat((temp_conv, lstm), 1)\n",
    "        del tmp\n",
    "        #self.lstm_out, self.hidden = self.lstm(x,self.hidden)\n",
    "        \n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.max(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Begin Training rep 1/1\t of Patient 11\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:58: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1,  50] loss: 1.317\tAccuracy : 0.492\t\tval-loss: 0.935\tval-Accuracy : 0.498\n",
      "[6,  50] loss: 0.345\tAccuracy : 0.888\t\tval-loss: 0.296\tval-Accuracy : 0.867\n",
      "[11,  50] loss: 0.123\tAccuracy : 0.948\t\tval-loss: 0.260\tval-Accuracy : 0.924\n",
      "[16,  50] loss: 0.076\tAccuracy : 0.965\t\tval-loss: 0.259\tval-Accuracy : 0.911\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-38-270da0643345>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     53\u001b[0m                 \u001b[0macc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     54\u001b[0m                 \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0macc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 55\u001b[0;31m                     \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCNN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     56\u001b[0m                     \u001b[0macc\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0midx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"cpu\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     57\u001b[0m                 \u001b[0macc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0macc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "p = 0\n",
    "n_rep = 1  \n",
    "n_patient = len(np.unique(Patient))\n",
    "fold_vloss = np.zeros((n_rep,n_patient))\n",
    "fold_loss = np.zeros((n_rep,n_patient))\n",
    "fold_vacc = np.zeros((n_rep,n_patient))\n",
    "fold_acc = np.zeros((n_rep,n_patient))\n",
    "\n",
    "un = np.unique(Patient)\n",
    "np.random.shuffle(un)\n",
    "for patient in un:\n",
    "    id_patient = np.arange(len(tmp))[Patient==patient]\n",
    "    id_train = np.arange(len(tmp))[Patient!=patient]\n",
    "    \n",
    "    for rep in range(n_rep):\n",
    "        np.random.shuffle(id_patient)\n",
    "        np.random.shuffle(id_train)\n",
    "        \n",
    "        X_train = tmp[id_train]\n",
    "        X_test = tmp[id_patient]\n",
    "        y_train = Label[id_train]\n",
    "        y_test = Label[id_patient]\n",
    "        \n",
    "        print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n",
    "             (rep+1,n_rep, patient))\n",
    "        \n",
    "        CNN = Mix().cuda()\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        optimizer = optim.Adam(CNN.parameters(), lr=0.0001)\n",
    "        \n",
    "        \n",
    "        n_epochs = 50\n",
    "        for epoch in range(n_epochs):\n",
    "            running_loss = 0.0\n",
    "            batchsize = 32\n",
    "            for i in range(int(len(y_train)/batchsize)):\n",
    "                #print(i)\n",
    "                optimizer.zero_grad()\n",
    "                # forward + backward + optimize\n",
    "                outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda().detach())\n",
    "                loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n",
    "                loss.backward(retain_graph=True)\n",
    "                optimizer.step()\n",
    "                running_loss += loss.item()         \n",
    "                \n",
    "                \n",
    "            if epoch%5==0:\n",
    "\n",
    "                check_id = np.arange(2000)\n",
    "                np.random.shuffle(check_id)\n",
    "                \n",
    "                #acc\n",
    "                acc = np.zeros(len(y_train))\n",
    "                for j in range(int(len(acc)/batchsize)+1):\n",
    "                    _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n",
    "                    acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n",
    "                acc = np.mean(acc)\n",
    "                \n",
    "                #validation\n",
    "                val_acc = np.zeros(len(y_test))\n",
    "                val_loss = []\n",
    "                for j in range(int(len(val_acc)/batchsize)+1):\n",
    "                    val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n",
    "                    _, idx = torch.max(val_outputs.data,1)\n",
    "                    val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n",
    "                    val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n",
    "                val_acc = np.mean(val_acc)\n",
    "                val_loss = np.mean(val_loss)\n",
    "\n",
    "                print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "             (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n",
    "                \n",
    "        fold_vloss[rep, p ] = val_loss\n",
    "        fold_loss[rep, p] = running_loss/i\n",
    "        fold_vacc[rep, p] = val_acc\n",
    "        fold_acc[rep, p] = acc\n",
    "      \n",
    "    \n",
    "    print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n",
    "                 (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n",
    "    \n",
    "    p = p + 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Results "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "fold_vloss = sio.loadmat(\"result_LSTM.mat\")['vloss']\n",
    "fold_loss = sio.loadmat(\"result_LSTM.mat\")['loss']\n",
    "fold_vacc = sio.loadmat(\"result_LSTM.mat\")['vacc']\n",
    "fold_acc = sio.loadmat(\"result_LSTM.mat\")['acc']  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD4CAYAAAD8Zh1EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAXvklEQVR4nO3de4xc513G8e+TddKLN5fdul21dWiCMGUTC7XdKCl0qXYwRU6pHCgGxQKLwKbmD2ygraCJFiVO0IqruDWhFcqWa71L6A0TGRIIs4WVaIlN02B7SDFpaZKmTZpuG7aVSB1+/DHH7ng99z1nZufd5yONPOfMmfN73/XOs2fOvHNeRQRmZjb4Luh3A8zMLB8OdDOzRDjQzcwS4UA3M0uEA93MLBGb+lV4y5YtccUVV3T0nK9//ets3ry5mAb1uE5KfUmtTkp9Sa1OSn3pts6xY8e+HBEvr/tgRPTlNjExEZ0ql8sdP6cbvaiTUl9Sq5NSX1Krk1Jfuq0DHI0GuepTLmZmiXCgm5klwoFuZpYIB7qZWSIc6GZmiWgZ6JI+IOlpSccbPC5JfyDplKRHJL0h/2aamVkr7Ryh/wmws8nj1wPbsts+4H1rb5aZmXWqZaBHxD8BX2myyQ3An2VDJD8BXCbplXk10MzM2qNo43rokq4A7ouI7XUeuw/49YhYypYfBN4TEUfrbLuP6lE8Y2NjEwsLCx01dmVlheHh4Y6e041e1EmpL6nVSaEvpVKp4WPlcrmQmv6/6U2dUql0LCKuqftgo28c1d6AK4DjDR67D5isWX4QuKbVPv1N0eJr9KLOoUOH4uqrr44LLrggrr766jh06FCh9fx/07nqy7x4/r/pTR2afFM0j2u5PAlcXrO8NVu3ro2OjrK8vNzRc0ZGRvjKV5qdfdpY5ufnmZmZYW5ujhdeeIGhoSGmp6cB2LNnT59btwEdvLTu6rj9koaPcfBrBTbIei2PQD8M7Je0AFwHfC0insphv4VaXl4+847iPIuLi0xNTZ23XtKa687PzzM7O0ulUmF8fJyZmZmBDb/Z2Vnm5uYolUpnf2Zzc3McOHBgYPs0yHTHc3V/p5v9PsfB4ttlvdMy0CXNA1PAFklPALcDFwJExPuBI8BbgVPAN4CfLqqxeWp21DIFsNjgOWuQ2hFtpVJhcnLynHWTk5NUKpU+tciS0eFr81vP29jvOFoGekQ0TZrsnM7P5daiHml0NAPFHdGkdkQ7Pj7O0tLSOR/ALS0tMT4+3sdWWQo6fbcBfscBfbwe+nrQ6SmUkZGRNdVL4oi25sjp+G7g4z8MH//WkVPpzPraI6wNftRk3en16zMFGzbQGx2dQ/aXvo3hnJ1K4oh2VTin9JmArR+NXn9FvTZTsWEDvR9mZmaYnp4+ew69XC4zPT3N7Oxsv5vWtT179rBnz56mb4XXqtmR2iC8uD2iam1W//+vXh6E34FecaD30Jkj1wMHDpw9op2dnfURbQu1L9hBPEJrNKKq1flgq6r92RV54JACBzr1Xzy16/IMkF4c0faST7m01mhE1RQ0HLGx1hFVtjE50Dk/sFMJ26KlNgyzMA0+FB7Edxu2vvl66Na12mGYmzZtolQqMTc3t+bPBEZHR5FU9wbUXT86OppHlwrXrC9ma+VAt64VNQzzzDnnerdyuVx3facfOvZLs76YrZVPuVjXkhiGmZhOjvQ9bjs9DnTrWlHDMPtxWYYUeOy2OdALlvIY5KKGYfbjsgypaTZ22+GeLgd6wVIfg1zUMEx/7XttPHZ7Y3KgF8xjkDvXj8symKXAgV4wXzXOzHrFgZ6gQb/2iZl1x4GeoEG/9omZdcdfLDIzS4SP0HvAIzbMrBd8hF6wTr/CHhEDMQbdzNYfH6HbuucvyZi1x4Fu656/JGMpKXIUmgPdzKyHihyF5nPoZmaJcKCbmSXCgW5mlggHeiIaTdsG9adsG6Rp28ysPQ70RDSatq3ZePdBmbbNzNrjQO+x+fl5tm/fzo4dO9i+fTvz8/P9bpKZJcLDFntofn6emZmZs1O2DQ0NMT09DbDmWX7MbP1qNnNZvXHp3c5a5kDvodnZWebm5iiVSme/IDM3N8eBAwfWHOieSMNs/ep05rJuZy1zoPdQpVJhcnLynHWTk5NUKpU179sTaZhZW+fQJe2U9KikU5JuqfP4ayQ9KOkRSYuStubf1ME3Pj7O0tLSOeuWlpYYHx/vU4vMLCUtj9AlDQF3A28BngAeknQ4Ik7WbPbbwJ9FxJ9K+n7g14C9RTR4kM3MzDA9PX32HHq5XGZ6eprZ2dl+N83MCtTpKdFuT4e2c8rlWuBURDwGIGkBuAGoDfSrgHdl98vAx7pqTeLOnCc/cOAAlUqF8fFxZmdnc/tA1NddN1ufOj0l2u3pULW6MIyk3cDOiLg5W94LXBcR+2u2OQR8MiJ+X9LbgQ8DWyLi2VX72gfsAxgbG5tYWFjoqLErKysMDw939Jxu9KJOSn1JrU5KfUmtzqD2pVQqUS6X267TaPvssWMRcU3dBxt96eTMDdgN3FOzvBe4a9U2rwI+AnwK+H2qp2Yua7bfiYmJ6FS5XO74Od3oRZ2U+pJanZT6klqdQe1LNWrbr9No++yxo9EgV9s55fIkcHnN8tZsXe0fhS8AbweQNAz8aER8tY19m5lZTtoZ5fIQsE3SlZIuAm4EDtduIGmLpDP7uhX4QL7NNDOzVloGekScBvYD9wMV4N6IOCHpTkm7ss2mgEclfQYYAzxsw8ysx9r6YlFEHAGOrFp3W839DwEfyrdpZmbWCV+cy8wsEQ50M7NEONDNzBLhQDczS4Svtmhm1gOdXJqj28tyONDNzAoWDS6xIqnhY93wKRczs0Q40M3MEuFANzNLhAPdzCwRDvREzc/Ps337dnbs2MH27duZn5/vd5MsQZLO3kql0jnL1nse5ZKg+fl5ZmZmzk51NzQ0xPT0NEBusyOZwbmjN/IesWGd8xF6gmZnZ5mbm6NUKrFp0yZKpRJzc3Oeu9QscQ70BFUqFSYnJ89ZNzk5SaVS6VOLzKwXHOgJGh8fZ2lp6Zx1S0tLjI+P96lFZtYLDvQEzczMMD09Tblc5vTp05TLZaanp5mZmel308ysQP5QNEFnPvg8cOAAlUqF8fFxZmdn/YGo2TqwegRQ7fJaP1T2EXqi9uzZw/Hjx3nwwQc5fvy4w9xsnYiIs7dyuXzO8lo50M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEtFWoEvaKelRSack3VLn8W+TVJb0KUmPSHpr/k01s34bHR09ZyLo1ZNC11s/Ojra51ZvHC0DXdIQcDdwPXAVsEfSVas2+xXg3oh4PXAj8Id5N9TM+m95efmcy702uxTsmdvy8nK/m71htHOEfi1wKiIei4jngQXghlXbBHBJdv9S4Av5NdHMzNqhVhdVl7Qb2BkRN2fLe4HrImJ/zTavBB4ARoDNwA9ExLE6+9oH7AMYGxubWFhY6KixKysrDA8Pd/ScbvSiTkp9Sa1OSn3Ju06pVKJcLndUp9lzOjWIP7O865RKpWMRcU3dBxu9faqZQWM3cE/N8l7grlXbvAt4d3b/e4CTwAXN9jsxMRGdKpfLHT+nG72ok1JfUquTUl/yrlONjM7qNHtOpwbxZ5Z3HeBoNMjVdk65PAlcXrO8NVtXaxq4N/sD8S/Ai4EtbezbzMxy0k6gPwRsk3SlpIuofuh5eNU2nwd2AEgapxroz+TZUDMza65loEfEaWA/cD9QoTqa5YSkOyXtyjZ7N/AOSZ8G5oGbsrcGZmbWI5va2SgijgBHVq27reb+SeBN+Tat6sz41gbtKqKkmdlAaivQ+6k2tCU5xM3MGvBX/83MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwR6zLQG01EC56E1syskXUZ6I0movUktGZmja3Lqy3G7ZfAwUvPWz8FsNhgezMrXKPXJvj1uR6sy0DXHc/VvUzu4uIiU1NT528vEQeLb5fZRtfotQl+fa4H6/KUi5mZdc6BbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWCAe6mVki1uW1XICzl8ttx8jISIEtMTMbDOsy0Btd/EdSw8fMzDa6dRnoZrZ+dfLuGfwOupcc6GbWtmbvkP0Ouv/a+lBU0k5Jj0o6JemWOo//rqSHs9tnJH01rwY2m4LOzMy+pWWgSxoC7gauB64C9ki6qnabiHhnRLwuIl4HvBf4SF4NbDYFnZmZfUs7R+jXAqci4rGIeB5YAG5osv0eYD6PxpmZWfvU6khX0m5gZ0TcnC3vBa6LiP11tn0N8Alga0S8UOfxfcA+gLGxsYmFhYWOGruyssLw8HBHz+lGL+qk1JfU6qTUl17WKZVKlMvlQmuk9jPrpk6pVDoWEdfUfbD2FEa9G7AbuKdmeS9wV4Nt3wO8t9U+I4KJiYnoVLlc7vg53ehFnZT6klqdlPrSyzrVOClWaj+zbuoAR6NBrrZzyuVJ4PKa5a3ZunpuxKdbzDYMD1pYX9oJ9IeAbZKulHQR1dA+vHojSd8FjAD/km8TzWy9qj069KCF/msZ6BFxGtgP3A9UgHsj4oSkOyXtqtn0RmAh/D9pZtYXbX2xKCKOAEdWrbtt1fLB/JplZmad8tUWzcwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRLR1cS6z1LW6frcvImqDwIFuxrmBLckBbgPJp1xswxodHT1nhp1GM+/U3kZHR/vcarPGHOi2YS0vL9edl3H1zDu1t+Xl5X4326whB7qZWSIc6GZmiXCgm5klwqNcbMOK2y+Bg5eet34KYLHJc8zWKQe6bVi647m6wxMXFxeZmpqq/xwJT4du65VPuZiZJcKBbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWiLYCXdJOSY9KOiXplgbb/Likk5JOSDqUbzPNzKyVltdykTQE3A28BXgCeEjS4Yg4WbPNNuBW4E0RsSzpFUU12MysCM3mlR2UKQnbOUK/FjgVEY9FxPPAAnDDqm3eAdwdEcsAEfF0vs00MytW7cxU9ZYHgVo1VtJuYGdE3Jwt7wWui4j9Ndt8DPgM8CZgCDgYEX9XZ1/7gH0AY2NjEwsLCx01dmVlheHh4Y6e041e1EmpL4Nap1Qqdfyciy++mMOHD+dSfxB/Zv2u06u+lEolyuVy4XW66U+pVDoWEdfUfbDR3Ik1f5l2A/fULO8F7lq1zX3AR4ELgSuBx4HLmu13YmIiOlUulzt+Tjd6USelvqRWp/qyKF5KP7Ne1elVX9bz7wBwNBrkajunXJ4ELq9Z3pqtq/UEcDgivhkRn6V6tL6tjX2bmVlO2gn0h4Btkq6UdBFwI7D6PefHyCZ6kbQF+E7gsRzbaWaWu9HRUSSddwPqrpfE6Ohon1vdWMtAj4jTwH7gfqAC3BsRJyTdKWlXttn9wLOSTgJl4Jci4tmiGm1mlofl5eW6py7K5XLD08XLy8v9bnZDbU1BFxFHgCOr1t1Wcz+Ad2U3MzPrA88pasb5Y5BXL8cADV2zjctf/TeDlm+3zQaBA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEetmhmG1bcfgkcvPS89VMAi02es0450M1sw9Idz9Udlrq4uMjU1FT950jEwWLb1S2fcjEzS4QD3cwsEQ50M7NEONDNzBLhQDczS4QD3cwsEQ50M7NEONDNzBLhLxaZ2Ya2ejKTVkZGRgpqydo50M1sw2o0eYmkgZzYxKdczMwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRLQV6JJ2SnpU0ilJt9R5/CZJz0h6OLvdnH9TzcysmZZXW5Q0BNwNvAV4AnhI0uGIOLlq07+MiP0FtNHMrHCrL6NbuzwoV15s5wj9WuBURDwWEc8DC8ANxTbLzKy3IuLsrVwun7M8KNSqsZJ2Azsj4uZseS9wXe3RuKSbgF8DngE+A7wzIh6vs699wD6AsbGxiYWFhY4au7KywvDwcEfP6UYv6qTUl9TqpNSX1Oqk1Jdu65RKpWMRcU3dB2v/CtW7AbuBe2qW9wJ3rdrmZcCLsvs/C/xjq/1OTExEp8rlcsfP6UYv6qTUl9TqpNSX1Oqk1Jdu6wBHo0GutnPK5Ung8prlrdm62j8Kz0bE/2aL9wAT7f2tMTOzvLQT6A8B2yRdKeki4EbgcO0Gkl5Zs7gLqOTXRDMza0fLUS4RcVrSfuB+YAj4QESckHQn1UP/w8DPS9oFnAa+AtxUYJvNzKyOtiaJjogjwJFV626ruX8rcGu+TTMzs074m6JmZolwoJuZJaLlOPTCCkvPAP/d4dO2AF8uoDn9qJNSX1Krk1JfUquTUl+6rfOaiHh5vQf6FujdkHQ0Gg2oH7A6KfUltTop9SW1Oin1pYg6PuViZpYIB7qZWSIGLdD/KKE6KfUltTop9SW1Oin1Jfc6A3UO3czMGhu0I3QzM2vAgW5mloiBCHRJH5D0tKTjBda4XFJZ0klJJyT9QkF1XizpXyV9OqtzRxF1auoNSfqUpPsKrPE5Sf+eTT94tKAal0n6kKT/kFSR9D0F1HhtzTSKD0t6TtIv5l0nq/XO7P//uKR5SS8uoMYvZPs/kXc/6r0mJY1K+ntJ/5n9O1JAjR/L+vN/knIZ7tegzm9lv2uPSPqopMsKqvOrWY2HJT0g6VVrKtLourrr6Qa8GXgDcLzAGq8E3pDdv5jqRB1XFVBHwHB2/0Lgk8AbC+zXu4BDwH0F1vgcsKXg34E/BW7O7l8EXFZwvSHgi1S/xJH3vl8NfBZ4SbZ8L3BTzjW2A8eBl1K9ZtM/AN+R4/7Pe00Cvwnckt2/BfiNAmqMA68FFoFrCuzLDwKbsvu/sda+NKlzSc39nwfev5YaA3GEHhH/RPUqjkXWeCoi/i27/z9ULwH86gLqRESsZIsXZrdCPpmWtBX4IarXqB9Yki6l+mKYA4iI5yPiqwWX3QH8V0R0+m3mdm0CXiJpE9XQ/ULO+x8HPhkR34iI08DHgbfntfMGr8kbqP7hJfv3h/OuERGViHh0Lftts84D2c8N4BNU54Eoos5zNYubWWMWDESg95qkK4DXUz16LmL/Q5IeBp4G/j4iCqkD/B7wy8D/FbT/MwJ4QNKxbJrBvF1JdXrDP85OH90jaXMBdWrdCMwXseOIeBL4beDzwFPA1yLigZzLHAe+T9LLJL0UeCvnTlRThLGIeCq7/0VgrOB6vfIzwN8WtXNJs5IeB34CuK3V9s040FeRNAx8GPjFVX89cxMRL0TE66j+1b9W0va8a0h6G/B0RBzLe991TEbEG4DrgZ+T9Oac97+J6lvV90XE64GvU31LX4hsIpddwF8VtP8RqkezVwKvAjZL+sk8a0REheqpggeAvwMeBl7Is0aL+kFB7zx7SdIM1XkePlhUjYiYiYjLsxr7W23fjAO9hqQLqYb5ByPiI0XXy04blIGdBez+TcAuSZ8DFoDvl/QXBdQ5c8RJRDwNfBS4NucSTwBP1LyT+RDVgC/K9cC/RcSXCtr/DwCfjYhnIuKbwEeA7827SETMRcRERLwZWKb6uVCRvnRm9rLs36cLrlcoSTcBbwN+IvsDVbQPAj+6lh040DOSRPUcbSUifqfAOi8/84m5pJcAbwH+I+86EXFrRGyNiCuonj74x4jI9SgQQNJmSRefuU/1w6RcRyNFxBeBxyW9Nlu1AziZZ41V9lDQ6ZbM54E3Snpp9nu3gwKmbZT0iuzfb6N6/vxQ3jVWOQz8VHb/p4C/LrheYSTtpHq6cldEfKPAOttqFm9grVmQx6fERd+ovrieAr5J9WhtuoAak1TfIj5C9e3pw8BbC6jz3cCnsjrHgdt68POboqBRLsC3A5/ObieAmYLqvA44mv3cPgaMFFRnM/AscGnB/yd3ZC/e48CfAy8qoMY/U/3D92lgR877Pu81CbwMeBD4T6qjakYLqPEj2f3/Bb4E3F9QX04Bj9dkwZpGnzSp8+Hsd+AR4G+AV6+lhr/6b2aWCJ9yMTNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0T8P6RyKI1RDRIBAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "fig = plt.figure()\n",
    "plt.boxplot(fold_vacc)\n",
    "plt.grid()\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Pytorch",
   "language": "python",
   "name": "pytorch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}