|
a |
|
b/.ipynb_checkpoints/EEGLearn-checkpoint.ipynb |
|
|
1 |
{ |
|
|
2 |
"cells": [ |
|
|
3 |
{ |
|
|
4 |
"cell_type": "code", |
|
|
5 |
"execution_count": 1, |
|
|
6 |
"metadata": {}, |
|
|
7 |
"outputs": [], |
|
|
8 |
"source": [ |
|
|
9 |
"import numpy as np \n", |
|
|
10 |
"import scipy.io as sio\n", |
|
|
11 |
"import matplotlib.pyplot as plt \n", |
|
|
12 |
"import seaborn as sn\n", |
|
|
13 |
"import pandas as pd\n", |
|
|
14 |
"\n", |
|
|
15 |
"import torch\n", |
|
|
16 |
"import os \n", |
|
|
17 |
"\n", |
|
|
18 |
"import torch.optim as optim\n", |
|
|
19 |
"import torch.nn as nn\n", |
|
|
20 |
"import torch.nn.functional as F\n", |
|
|
21 |
"\n", |
|
|
22 |
"from torch.autograd import Variable\n", |
|
|
23 |
"from torch.utils.data.dataset import Dataset\n", |
|
|
24 |
"from torch.utils.data import DataLoader,random_split\n", |
|
|
25 |
"\n", |
|
|
26 |
"from sklearn.model_selection import train_test_split\n", |
|
|
27 |
"\n", |
|
|
28 |
"torch.manual_seed(1234)\n", |
|
|
29 |
"np.random.seed(1234)" |
|
|
30 |
] |
|
|
31 |
}, |
|
|
32 |
{ |
|
|
33 |
"cell_type": "markdown", |
|
|
34 |
"metadata": {}, |
|
|
35 |
"source": [ |
|
|
36 |
"## Instancing on the GPU" |
|
|
37 |
] |
|
|
38 |
}, |
|
|
39 |
{ |
|
|
40 |
"cell_type": "code", |
|
|
41 |
"execution_count": 2, |
|
|
42 |
"metadata": {}, |
|
|
43 |
"outputs": [], |
|
|
44 |
"source": [ |
|
|
45 |
"device = torch.device(\"cuda\")" |
|
|
46 |
] |
|
|
47 |
}, |
|
|
48 |
{ |
|
|
49 |
"cell_type": "markdown", |
|
|
50 |
"metadata": {}, |
|
|
51 |
"source": [ |
|
|
52 |
"## Loading the images" |
|
|
53 |
] |
|
|
54 |
}, |
|
|
55 |
{ |
|
|
56 |
"cell_type": "code", |
|
|
57 |
"execution_count": 5, |
|
|
58 |
"metadata": {}, |
|
|
59 |
"outputs": [ |
|
|
60 |
{ |
|
|
61 |
"name": "stdout", |
|
|
62 |
"output_type": "stream", |
|
|
63 |
"text": [ |
|
|
64 |
"(2670, 3, 32, 32)\n", |
|
|
65 |
"(2670, 7, 3, 32, 32)\n", |
|
|
66 |
"(2670,)\n", |
|
|
67 |
"(2670,)\n" |
|
|
68 |
] |
|
|
69 |
} |
|
|
70 |
], |
|
|
71 |
"source": [ |
|
|
72 |
"Mean_Images = sio.loadmat(\"images.mat\")[\"img\"] #corresponding to the images mean for all the seven windows\n", |
|
|
73 |
"print(np.shape(Mean_Images)) \n", |
|
|
74 |
"\n", |
|
|
75 |
"Images = sio.loadmat(\"images_time.mat\")[\"img\"] #corresponding to the images mean for all the seven windows\n", |
|
|
76 |
"print(np.shape(Images)) \n", |
|
|
77 |
"\n", |
|
|
78 |
"\n", |
|
|
79 |
"Label = (sio.loadmat(\"FeatureMat_timeWin\")[\"features\"][:,-1]-1).astype(int)\n", |
|
|
80 |
"print(np.shape(Label)) \n", |
|
|
81 |
"\n", |
|
|
82 |
"Patient_id = sio.loadmat(\"trials_subNums.mat\")['subjectNum'][0]\n", |
|
|
83 |
"print(np.shape(Patient_id))" |
|
|
84 |
] |
|
|
85 |
}, |
|
|
86 |
{ |
|
|
87 |
"cell_type": "markdown", |
|
|
88 |
"metadata": {}, |
|
|
89 |
"source": [ |
|
|
90 |
"### Dataloader" |
|
|
91 |
] |
|
|
92 |
}, |
|
|
93 |
{ |
|
|
94 |
"cell_type": "code", |
|
|
95 |
"execution_count": 6, |
|
|
96 |
"metadata": {}, |
|
|
97 |
"outputs": [], |
|
|
98 |
"source": [ |
|
|
99 |
"class EEGImagesDataset(Dataset):\n", |
|
|
100 |
" \"\"\"EEGLearn Images Dataset from EEG.\"\"\"\n", |
|
|
101 |
" \n", |
|
|
102 |
" def __init__(self, label, image):\n", |
|
|
103 |
" self.label = Label\n", |
|
|
104 |
" self.Images = image\n", |
|
|
105 |
" \n", |
|
|
106 |
" def __len__(self):\n", |
|
|
107 |
" return len(self.label)\n", |
|
|
108 |
" \n", |
|
|
109 |
" def __getitem__(self, idx):\n", |
|
|
110 |
" if torch.is_tensor(idx):\n", |
|
|
111 |
" idx = idx.tolist()\n", |
|
|
112 |
" image = self.Images[idx]\n", |
|
|
113 |
" label = self.label[idx]\n", |
|
|
114 |
" sample = (image, label)\n", |
|
|
115 |
" \n", |
|
|
116 |
" return sample" |
|
|
117 |
] |
|
|
118 |
}, |
|
|
119 |
{ |
|
|
120 |
"cell_type": "markdown", |
|
|
121 |
"metadata": {}, |
|
|
122 |
"source": [ |
|
|
123 |
"### K-Fold Validation" |
|
|
124 |
] |
|
|
125 |
}, |
|
|
126 |
{ |
|
|
127 |
"cell_type": "code", |
|
|
128 |
"execution_count": 7, |
|
|
129 |
"metadata": {}, |
|
|
130 |
"outputs": [], |
|
|
131 |
"source": [ |
|
|
132 |
"def kfold(length, n_fold):\n", |
|
|
133 |
" tot_id = np.arange(length)\n", |
|
|
134 |
" np.random.shuffle(tot_id)\n", |
|
|
135 |
" len_fold = int(length/n_fold)\n", |
|
|
136 |
" train_id = []\n", |
|
|
137 |
" test_id = []\n", |
|
|
138 |
" for i in range(n_fold):\n", |
|
|
139 |
" test_id.append(tot_id[i*len_fold:(i+1)*len_fold])\n", |
|
|
140 |
" train_id.append(np.hstack([tot_id[0:i*len_fold],tot_id[(i+1)*len_fold:-1]]))\n", |
|
|
141 |
" return train_id, test_id" |
|
|
142 |
] |
|
|
143 |
}, |
|
|
144 |
{ |
|
|
145 |
"cell_type": "markdown", |
|
|
146 |
"metadata": {}, |
|
|
147 |
"source": [ |
|
|
148 |
"# Mean Image" |
|
|
149 |
] |
|
|
150 |
}, |
|
|
151 |
{ |
|
|
152 |
"cell_type": "markdown", |
|
|
153 |
"metadata": {}, |
|
|
154 |
"source": [ |
|
|
155 |
"## Basic Model" |
|
|
156 |
] |
|
|
157 |
}, |
|
|
158 |
{ |
|
|
159 |
"cell_type": "code", |
|
|
160 |
"execution_count": 8, |
|
|
161 |
"metadata": {}, |
|
|
162 |
"outputs": [], |
|
|
163 |
"source": [ |
|
|
164 |
"class BasicCNN(nn.Module):\n", |
|
|
165 |
" '''\n", |
|
|
166 |
" Build the Mean Basic model performing a classification with CNN \n", |
|
|
167 |
"\n", |
|
|
168 |
" param input_image: list of EEG image [batch_size, n_window, n_channel, h, w]\n", |
|
|
169 |
" param kernel: kernel size used for the convolutional layers\n", |
|
|
170 |
" param stride: stride apply during the convolutions\n", |
|
|
171 |
" param padding: padding used during the convolutions\n", |
|
|
172 |
" param max_kernel: kernel used for the maxpooling steps\n", |
|
|
173 |
" param n_classes: number of classes\n", |
|
|
174 |
" return x: output of the last layers after the log softmax\n", |
|
|
175 |
" '''\n", |
|
|
176 |
" def __init__(self, input_image=torch.zeros(1, 3, 32, 32), kernel=(3,3), stride=1, padding=1,max_kernel=(2,2), n_classes=4):\n", |
|
|
177 |
" super(BasicCNN, self).__init__()\n", |
|
|
178 |
"\n", |
|
|
179 |
" n_window = input_image.shape[1]\n", |
|
|
180 |
" n_channel = input_image.shape[2]\n", |
|
|
181 |
"\n", |
|
|
182 |
" self.conv1 = nn.Conv2d(3,32,kernel,stride=stride, padding=padding)\n", |
|
|
183 |
" self.conv2 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n", |
|
|
184 |
" self.conv3 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n", |
|
|
185 |
" self.conv4 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n", |
|
|
186 |
" self.pool1 = nn.MaxPool2d(max_kernel)\n", |
|
|
187 |
" self.conv5 = nn.Conv2d(32,64,kernel,stride=stride,padding=padding)\n", |
|
|
188 |
" self.conv6 = nn.Conv2d(64,64,kernel,stride=stride,padding=padding)\n", |
|
|
189 |
" self.conv7 = nn.Conv2d(64,128,kernel,stride=stride,padding=padding)\n", |
|
|
190 |
"\n", |
|
|
191 |
" self.pool = nn.MaxPool2d((1,1))\n", |
|
|
192 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
193 |
"\n", |
|
|
194 |
" self.fc1 = nn.Linear(2048,512)\n", |
|
|
195 |
" self.fc2 = nn.Linear(512,n_classes)\n", |
|
|
196 |
" self.max = nn.LogSoftmax()\n", |
|
|
197 |
" \n", |
|
|
198 |
" def forward(self, x):\n", |
|
|
199 |
" batch_size = x.shape[0]\n", |
|
|
200 |
" x = F.relu(self.conv1(x))\n", |
|
|
201 |
" x = F.relu(self.conv2(x))\n", |
|
|
202 |
" x = F.relu(self.conv3(x))\n", |
|
|
203 |
" x = F.relu(self.conv4(x))\n", |
|
|
204 |
" x = self.pool1(x)\n", |
|
|
205 |
" x = F.relu(self.conv5(x))\n", |
|
|
206 |
" x = F.relu(self.conv6(x))\n", |
|
|
207 |
" x = self.pool1(x)\n", |
|
|
208 |
" x = F.relu(self.conv7(x))\n", |
|
|
209 |
" x = self.pool1(x)\n", |
|
|
210 |
" x = x.reshape(x.shape[0],x.shape[1], -1)\n", |
|
|
211 |
" x = self.pool(x)\n", |
|
|
212 |
" x = x.reshape(x.shape[0],-1)\n", |
|
|
213 |
" x = self.fc1(x)\n", |
|
|
214 |
" x = self.fc2(x)\n", |
|
|
215 |
" x = self.max(x)\n", |
|
|
216 |
" return x" |
|
|
217 |
] |
|
|
218 |
}, |
|
|
219 |
{ |
|
|
220 |
"cell_type": "code", |
|
|
221 |
"execution_count": 10, |
|
|
222 |
"metadata": {}, |
|
|
223 |
"outputs": [], |
|
|
224 |
"source": [ |
|
|
225 |
"EEG = EEGImagesDataset(label=Label, image=Mean_Images)\n", |
|
|
226 |
"\n", |
|
|
227 |
"lengths = [int(2670*0.8), int(2670*0.2)]\n", |
|
|
228 |
"Train, Test = random_split(EEG, lengths)\n", |
|
|
229 |
"\n", |
|
|
230 |
"Trainloader = DataLoader(Train,batch_size=32)\n", |
|
|
231 |
"Testloader = DataLoader(Test, batch_size=32)" |
|
|
232 |
] |
|
|
233 |
}, |
|
|
234 |
{ |
|
|
235 |
"cell_type": "code", |
|
|
236 |
"execution_count": 140, |
|
|
237 |
"metadata": {}, |
|
|
238 |
"outputs": [ |
|
|
239 |
{ |
|
|
240 |
"name": "stderr", |
|
|
241 |
"output_type": "stream", |
|
|
242 |
"text": [ |
|
|
243 |
"/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:52: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
244 |
] |
|
|
245 |
}, |
|
|
246 |
{ |
|
|
247 |
"name": "stdout", |
|
|
248 |
"output_type": "stream", |
|
|
249 |
"text": [ |
|
|
250 |
"Finished Training\n" |
|
|
251 |
] |
|
|
252 |
} |
|
|
253 |
], |
|
|
254 |
"source": [ |
|
|
255 |
"net = BasicCNN().cuda()\n", |
|
|
256 |
"criterion = nn.CrossEntropyLoss()\n", |
|
|
257 |
"optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n", |
|
|
258 |
"\n", |
|
|
259 |
"for epoch in range(30): # loop over the dataset multiple times\n", |
|
|
260 |
" running_loss = 0.0\n", |
|
|
261 |
" evaluation = []\n", |
|
|
262 |
" for i, data in enumerate(Trainloader, 0):\n", |
|
|
263 |
" # get the inputs; data is a list of [inputs, labels]\n", |
|
|
264 |
" inputs, labels = data\n", |
|
|
265 |
" # zero the parameter gradients\n", |
|
|
266 |
" optimizer.zero_grad()\n", |
|
|
267 |
"\n", |
|
|
268 |
" # forward + backward + optimize\n", |
|
|
269 |
" outputs = net(inputs.to(torch.float32).cuda())\n", |
|
|
270 |
" _, predicted = torch.max(outputs.cpu().data, 1)\n", |
|
|
271 |
" evaluation.append((predicted==labels).tolist())\n", |
|
|
272 |
" loss = criterion(outputs, labels.cuda())\n", |
|
|
273 |
" loss.backward()\n", |
|
|
274 |
" optimizer.step()\n", |
|
|
275 |
" \n", |
|
|
276 |
" running_loss += loss.item()\n", |
|
|
277 |
" running_loss = running_loss/(i+1)\n", |
|
|
278 |
" evaluation = [item for sublist in evaluation for item in sublist]\n", |
|
|
279 |
" running_acc = sum(evaluation)/len(evaluation)\n", |
|
|
280 |
" validation_loss, validation_acc = Test_Model(net, Testloader,True)\n", |
|
|
281 |
"\n", |
|
|
282 |
"print('Finished Training')" |
|
|
283 |
] |
|
|
284 |
}, |
|
|
285 |
{ |
|
|
286 |
"cell_type": "code", |
|
|
287 |
"execution_count": 139, |
|
|
288 |
"metadata": {}, |
|
|
289 |
"outputs": [ |
|
|
290 |
{ |
|
|
291 |
"name": "stderr", |
|
|
292 |
"output_type": "stream", |
|
|
293 |
"text": [ |
|
|
294 |
"/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:52: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
295 |
] |
|
|
296 |
}, |
|
|
297 |
{ |
|
|
298 |
"name": "stdout", |
|
|
299 |
"output_type": "stream", |
|
|
300 |
"text": [ |
|
|
301 |
"[5, 10]\tloss: 1.384\tAccuracy : 0.277\t\tval-loss: 1.383\tval-Accuracy : 0.311\n", |
|
|
302 |
"[10, 10]\tloss: 1.383\tAccuracy : 0.277\t\tval-loss: 1.382\tval-Accuracy : 0.311\n", |
|
|
303 |
"Finished Training \n", |
|
|
304 |
" loss: 1.383\tAccuracy : 0.277\t\tval-loss: 1.382\tval-Accuracy : 0.311\n" |
|
|
305 |
] |
|
|
306 |
} |
|
|
307 |
], |
|
|
308 |
"source": [ |
|
|
309 |
"res = TrainTest_Model(BasicCNN, Trainloader, Testloader, n_epoch=10)" |
|
|
310 |
] |
|
|
311 |
}, |
|
|
312 |
{ |
|
|
313 |
"cell_type": "code", |
|
|
314 |
"execution_count": 111, |
|
|
315 |
"metadata": {}, |
|
|
316 |
"outputs": [], |
|
|
317 |
"source": [ |
|
|
318 |
"def Test_Model(net, Testloader, is_cuda=True):\n", |
|
|
319 |
" running_loss = 0.0 \n", |
|
|
320 |
" evaluation = []\n", |
|
|
321 |
" for i, data in enumerate(Testloader, 0):\n", |
|
|
322 |
" input_img, labels = data\n", |
|
|
323 |
" optimizer.zero_grad()\n", |
|
|
324 |
" input_img = input_img.to(torch.float32)\n", |
|
|
325 |
" if is_cuda:\n", |
|
|
326 |
" input_img = input_img.cuda()\n", |
|
|
327 |
" outputs = net(input_img)\n", |
|
|
328 |
" _, predicted = torch.max(outputs.cpu().data, 1)\n", |
|
|
329 |
" evaluation.append((predicted==labels).tolist())\n", |
|
|
330 |
" loss = criterion(outputs, labels.cuda())\n", |
|
|
331 |
" running_loss += loss.item()\n", |
|
|
332 |
" running_loss = running_loss/(i+1)\n", |
|
|
333 |
" evaluation = [item for sublist in evaluation for item in sublist]\n", |
|
|
334 |
" running_acc = sum(evaluation)/len(evaluation)\n", |
|
|
335 |
" return running_loss, running_acc" |
|
|
336 |
] |
|
|
337 |
}, |
|
|
338 |
{ |
|
|
339 |
"cell_type": "code", |
|
|
340 |
"execution_count": 138, |
|
|
341 |
"metadata": {}, |
|
|
342 |
"outputs": [], |
|
|
343 |
"source": [ |
|
|
344 |
"def TrainTest_Model(model, trainloader, testloader, n_epoch=30, opti='SGD', learning_rate=0.0001, is_cuda=True, print_epoch =5):\n", |
|
|
345 |
" if is_cuda:\n", |
|
|
346 |
" net = model().cuda()\n", |
|
|
347 |
" else :\n", |
|
|
348 |
" net = model()\n", |
|
|
349 |
" \n", |
|
|
350 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
351 |
" \n", |
|
|
352 |
" if opti=='SGD':\n", |
|
|
353 |
" optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)\n", |
|
|
354 |
" elif opti =='Adam':\n", |
|
|
355 |
" optimizer = optim.Adam(CNN.parameters(), lr=learning_rate)\n", |
|
|
356 |
" else: \n", |
|
|
357 |
" print(\"Optimizer: \"+optim+\" not implemented.\")\n", |
|
|
358 |
" \n", |
|
|
359 |
" for epoch in range(n_epoch):\n", |
|
|
360 |
" running_loss = 0.0\n", |
|
|
361 |
" evaluation = []\n", |
|
|
362 |
" for i, data in enumerate(Trainloader, 0):\n", |
|
|
363 |
" # get the inputs; data is a list of [inputs, labels]\n", |
|
|
364 |
" inputs, labels = data\n", |
|
|
365 |
" # zero the parameter gradients\n", |
|
|
366 |
" optimizer.zero_grad()\n", |
|
|
367 |
"\n", |
|
|
368 |
" # forward + backward + optimize\n", |
|
|
369 |
" outputs = net(inputs.to(torch.float32).cuda())\n", |
|
|
370 |
" _, predicted = torch.max(outputs.cpu().data, 1)\n", |
|
|
371 |
" evaluation.append((predicted==labels).tolist())\n", |
|
|
372 |
" loss = criterion(outputs, labels.cuda())\n", |
|
|
373 |
" loss.backward()\n", |
|
|
374 |
" optimizer.step()\n", |
|
|
375 |
"\n", |
|
|
376 |
" running_loss += loss.item()\n", |
|
|
377 |
"\n", |
|
|
378 |
" running_loss = running_loss/(i+1)\n", |
|
|
379 |
" evaluation = [item for sublist in evaluation for item in sublist]\n", |
|
|
380 |
" running_acc = sum(evaluation)/len(evaluation)\n", |
|
|
381 |
" validation_loss, validation_acc = Test_Model(net, Testloader,True)\n", |
|
|
382 |
" \n", |
|
|
383 |
" if epoch%print_epoch==(print_epoch-1):\n", |
|
|
384 |
" print('[%d, %3d]\\tloss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
385 |
" (epoch+1, n_epoch, running_loss, running_acc, validation_loss, validation_acc))\n", |
|
|
386 |
" \n", |
|
|
387 |
" print('Finished Training \\n loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
388 |
" (running_loss, running_acc, validation_loss,validation_acc))\n", |
|
|
389 |
" \n", |
|
|
390 |
" return (running_loss, running_acc, validation_loss,validation_acc)" |
|
|
391 |
] |
|
|
392 |
}, |
|
|
393 |
{ |
|
|
394 |
"cell_type": "code", |
|
|
395 |
"execution_count": 477, |
|
|
396 |
"metadata": { |
|
|
397 |
"scrolled": true |
|
|
398 |
}, |
|
|
399 |
"outputs": [ |
|
|
400 |
{ |
|
|
401 |
"name": "stdout", |
|
|
402 |
"output_type": "stream", |
|
|
403 |
"text": [ |
|
|
404 |
"Begin Training Fold 1/5\t of Patient 1\n" |
|
|
405 |
] |
|
|
406 |
}, |
|
|
407 |
{ |
|
|
408 |
"name": "stderr", |
|
|
409 |
"output_type": "stream", |
|
|
410 |
"text": [ |
|
|
411 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:19: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
412 |
] |
|
|
413 |
}, |
|
|
414 |
{ |
|
|
415 |
"name": "stdout", |
|
|
416 |
"output_type": "stream", |
|
|
417 |
"text": [ |
|
|
418 |
"Finish Training Fold 1/5\t of Patient 1\n", |
|
|
419 |
"Begin Training Fold 2/5\t of Patient 1\n", |
|
|
420 |
"Finish Training Fold 2/5\t of Patient 1\n", |
|
|
421 |
"Begin Training Fold 3/5\t of Patient 1\n", |
|
|
422 |
"Finish Training Fold 3/5\t of Patient 1\n", |
|
|
423 |
"Begin Training Fold 4/5\t of Patient 1\n", |
|
|
424 |
"Finish Training Fold 4/5\t of Patient 1\n", |
|
|
425 |
"Begin Training Fold 5/5\t of Patient 1\n", |
|
|
426 |
"Finish Training Fold 5/5\t of Patient 1\n", |
|
|
427 |
"loss: 0.854\tAccuracy : 0.855\t\tval-loss: 0.956\tval-Accuracy : 0.805\n", |
|
|
428 |
"Begin Training Fold 1/5\t of Patient 2\n", |
|
|
429 |
"Finish Training Fold 1/5\t of Patient 2\n", |
|
|
430 |
"Begin Training Fold 2/5\t of Patient 2\n", |
|
|
431 |
"Finish Training Fold 2/5\t of Patient 2\n", |
|
|
432 |
"Begin Training Fold 3/5\t of Patient 2\n", |
|
|
433 |
"Finish Training Fold 3/5\t of Patient 2\n", |
|
|
434 |
"Begin Training Fold 4/5\t of Patient 2\n", |
|
|
435 |
"Finish Training Fold 4/5\t of Patient 2\n", |
|
|
436 |
"Begin Training Fold 5/5\t of Patient 2\n", |
|
|
437 |
"Finish Training Fold 5/5\t of Patient 2\n", |
|
|
438 |
"loss: 0.824\tAccuracy : 0.872\t\tval-loss: 0.897\tval-Accuracy : 0.852\n", |
|
|
439 |
"Begin Training Fold 1/5\t of Patient 3\n", |
|
|
440 |
"Finish Training Fold 1/5\t of Patient 3\n", |
|
|
441 |
"Begin Training Fold 2/5\t of Patient 3\n", |
|
|
442 |
"Finish Training Fold 2/5\t of Patient 3\n", |
|
|
443 |
"Begin Training Fold 3/5\t of Patient 3\n", |
|
|
444 |
"Finish Training Fold 3/5\t of Patient 3\n", |
|
|
445 |
"Begin Training Fold 4/5\t of Patient 3\n", |
|
|
446 |
"Finish Training Fold 4/5\t of Patient 3\n", |
|
|
447 |
"Begin Training Fold 5/5\t of Patient 3\n", |
|
|
448 |
"Finish Training Fold 5/5\t of Patient 3\n", |
|
|
449 |
"loss: 0.778\tAccuracy : 0.883\t\tval-loss: 0.900\tval-Accuracy : 0.841\n", |
|
|
450 |
"Begin Training Fold 1/5\t of Patient 4\n", |
|
|
451 |
"Finish Training Fold 1/5\t of Patient 4\n", |
|
|
452 |
"Begin Training Fold 2/5\t of Patient 4\n", |
|
|
453 |
"Finish Training Fold 2/5\t of Patient 4\n", |
|
|
454 |
"Begin Training Fold 3/5\t of Patient 4\n", |
|
|
455 |
"Finish Training Fold 3/5\t of Patient 4\n", |
|
|
456 |
"Begin Training Fold 4/5\t of Patient 4\n", |
|
|
457 |
"Finish Training Fold 4/5\t of Patient 4\n", |
|
|
458 |
"Begin Training Fold 5/5\t of Patient 4\n", |
|
|
459 |
"Finish Training Fold 5/5\t of Patient 4\n", |
|
|
460 |
"loss: 0.770\tAccuracy : 0.960\t\tval-loss: 0.797\tval-Accuracy : 0.950\n", |
|
|
461 |
"Begin Training Fold 1/5\t of Patient 6\n", |
|
|
462 |
"Finish Training Fold 1/5\t of Patient 6\n", |
|
|
463 |
"Begin Training Fold 2/5\t of Patient 6\n", |
|
|
464 |
"Finish Training Fold 2/5\t of Patient 6\n", |
|
|
465 |
"Begin Training Fold 3/5\t of Patient 6\n", |
|
|
466 |
"Finish Training Fold 3/5\t of Patient 6\n", |
|
|
467 |
"Begin Training Fold 4/5\t of Patient 6\n", |
|
|
468 |
"Finish Training Fold 4/5\t of Patient 6\n", |
|
|
469 |
"Begin Training Fold 5/5\t of Patient 6\n", |
|
|
470 |
"Finish Training Fold 5/5\t of Patient 6\n", |
|
|
471 |
"loss: 0.814\tAccuracy : 0.888\t\tval-loss: 0.872\tval-Accuracy : 0.872\n", |
|
|
472 |
"Begin Training Fold 1/5\t of Patient 7\n", |
|
|
473 |
"Finish Training Fold 1/5\t of Patient 7\n", |
|
|
474 |
"Begin Training Fold 2/5\t of Patient 7\n", |
|
|
475 |
"Finish Training Fold 2/5\t of Patient 7\n", |
|
|
476 |
"Begin Training Fold 3/5\t of Patient 7\n", |
|
|
477 |
"Finish Training Fold 3/5\t of Patient 7\n", |
|
|
478 |
"Begin Training Fold 4/5\t of Patient 7\n", |
|
|
479 |
"Finish Training Fold 4/5\t of Patient 7\n", |
|
|
480 |
"Begin Training Fold 5/5\t of Patient 7\n", |
|
|
481 |
"Finish Training Fold 5/5\t of Patient 7\n", |
|
|
482 |
"loss: 0.785\tAccuracy : 0.893\t\tval-loss: 0.862\tval-Accuracy : 0.885\n", |
|
|
483 |
"Begin Training Fold 1/5\t of Patient 8\n", |
|
|
484 |
"Finish Training Fold 1/5\t of Patient 8\n", |
|
|
485 |
"Begin Training Fold 2/5\t of Patient 8\n", |
|
|
486 |
"Finish Training Fold 2/5\t of Patient 8\n", |
|
|
487 |
"Begin Training Fold 3/5\t of Patient 8\n", |
|
|
488 |
"Finish Training Fold 3/5\t of Patient 8\n", |
|
|
489 |
"Begin Training Fold 4/5\t of Patient 8\n", |
|
|
490 |
"Finish Training Fold 4/5\t of Patient 8\n", |
|
|
491 |
"Begin Training Fold 5/5\t of Patient 8\n", |
|
|
492 |
"Finish Training Fold 5/5\t of Patient 8\n", |
|
|
493 |
"loss: 0.780\tAccuracy : 0.951\t\tval-loss: 0.795\tval-Accuracy : 0.953\n", |
|
|
494 |
"Begin Training Fold 1/5\t of Patient 9\n", |
|
|
495 |
"Finish Training Fold 1/5\t of Patient 9\n", |
|
|
496 |
"Begin Training Fold 2/5\t of Patient 9\n", |
|
|
497 |
"Finish Training Fold 2/5\t of Patient 9\n", |
|
|
498 |
"Begin Training Fold 3/5\t of Patient 9\n", |
|
|
499 |
"Finish Training Fold 3/5\t of Patient 9\n", |
|
|
500 |
"Begin Training Fold 4/5\t of Patient 9\n", |
|
|
501 |
"Finish Training Fold 4/5\t of Patient 9\n", |
|
|
502 |
"Begin Training Fold 5/5\t of Patient 9\n", |
|
|
503 |
"Finish Training Fold 5/5\t of Patient 9\n", |
|
|
504 |
"loss: 0.769\tAccuracy : 0.965\t\tval-loss: 0.808\tval-Accuracy : 0.930\n", |
|
|
505 |
"Begin Training Fold 1/5\t of Patient 10\n", |
|
|
506 |
"Finish Training Fold 1/5\t of Patient 10\n", |
|
|
507 |
"Begin Training Fold 2/5\t of Patient 10\n", |
|
|
508 |
"Finish Training Fold 2/5\t of Patient 10\n", |
|
|
509 |
"Begin Training Fold 3/5\t of Patient 10\n", |
|
|
510 |
"Finish Training Fold 3/5\t of Patient 10\n", |
|
|
511 |
"Begin Training Fold 4/5\t of Patient 10\n", |
|
|
512 |
"Finish Training Fold 4/5\t of Patient 10\n", |
|
|
513 |
"Begin Training Fold 5/5\t of Patient 10\n", |
|
|
514 |
"Finish Training Fold 5/5\t of Patient 10\n", |
|
|
515 |
"loss: 0.774\tAccuracy : 0.939\t\tval-loss: 0.803\tval-Accuracy : 0.943\n", |
|
|
516 |
"Begin Training Fold 1/5\t of Patient 11\n", |
|
|
517 |
"Finish Training Fold 1/5\t of Patient 11\n", |
|
|
518 |
"Begin Training Fold 2/5\t of Patient 11\n", |
|
|
519 |
"Finish Training Fold 2/5\t of Patient 11\n", |
|
|
520 |
"Begin Training Fold 3/5\t of Patient 11\n", |
|
|
521 |
"Finish Training Fold 3/5\t of Patient 11\n", |
|
|
522 |
"Begin Training Fold 4/5\t of Patient 11\n", |
|
|
523 |
"Finish Training Fold 4/5\t of Patient 11\n", |
|
|
524 |
"Begin Training Fold 5/5\t of Patient 11\n", |
|
|
525 |
"Finish Training Fold 5/5\t of Patient 11\n", |
|
|
526 |
"loss: 0.768\tAccuracy : 0.944\t\tval-loss: 0.832\tval-Accuracy : 0.920\n", |
|
|
527 |
"Begin Training Fold 1/5\t of Patient 12\n", |
|
|
528 |
"Finish Training Fold 1/5\t of Patient 12\n", |
|
|
529 |
"Begin Training Fold 2/5\t of Patient 12\n", |
|
|
530 |
"Finish Training Fold 2/5\t of Patient 12\n", |
|
|
531 |
"Begin Training Fold 3/5\t of Patient 12\n", |
|
|
532 |
"Finish Training Fold 3/5\t of Patient 12\n", |
|
|
533 |
"Begin Training Fold 4/5\t of Patient 12\n", |
|
|
534 |
"Finish Training Fold 4/5\t of Patient 12\n", |
|
|
535 |
"Begin Training Fold 5/5\t of Patient 12\n", |
|
|
536 |
"Finish Training Fold 5/5\t of Patient 12\n", |
|
|
537 |
"loss: 0.767\tAccuracy : 0.924\t\tval-loss: 0.810\tval-Accuracy : 0.944\n", |
|
|
538 |
"Begin Training Fold 1/5\t of Patient 14\n", |
|
|
539 |
"Finish Training Fold 1/5\t of Patient 14\n", |
|
|
540 |
"Begin Training Fold 2/5\t of Patient 14\n", |
|
|
541 |
"Finish Training Fold 2/5\t of Patient 14\n", |
|
|
542 |
"Begin Training Fold 3/5\t of Patient 14\n", |
|
|
543 |
"Finish Training Fold 3/5\t of Patient 14\n", |
|
|
544 |
"Begin Training Fold 4/5\t of Patient 14\n", |
|
|
545 |
"Finish Training Fold 4/5\t of Patient 14\n", |
|
|
546 |
"Begin Training Fold 5/5\t of Patient 14\n", |
|
|
547 |
"Finish Training Fold 5/5\t of Patient 14\n", |
|
|
548 |
"loss: 0.795\tAccuracy : 0.904\t\tval-loss: 0.895\tval-Accuracy : 0.854\n", |
|
|
549 |
"Begin Training Fold 1/5\t of Patient 15\n", |
|
|
550 |
"Finish Training Fold 1/5\t of Patient 15\n", |
|
|
551 |
"Begin Training Fold 2/5\t of Patient 15\n", |
|
|
552 |
"Finish Training Fold 2/5\t of Patient 15\n", |
|
|
553 |
"Begin Training Fold 3/5\t of Patient 15\n", |
|
|
554 |
"Finish Training Fold 3/5\t of Patient 15\n", |
|
|
555 |
"Begin Training Fold 4/5\t of Patient 15\n", |
|
|
556 |
"Finish Training Fold 4/5\t of Patient 15\n", |
|
|
557 |
"Begin Training Fold 5/5\t of Patient 15\n", |
|
|
558 |
"Finish Training Fold 5/5\t of Patient 15\n", |
|
|
559 |
"loss: 0.772\tAccuracy : 0.967\t\tval-loss: 0.768\tval-Accuracy : 0.986\n" |
|
|
560 |
] |
|
|
561 |
} |
|
|
562 |
], |
|
|
563 |
"source": [ |
|
|
564 |
"p = 0\n", |
|
|
565 |
"fold_vloss = np.zeros((n_fold,n_patient))\n", |
|
|
566 |
"fold_loss = np.zeros((n_fold,n_patient))\n", |
|
|
567 |
"fold_vacc = np.zeros((n_fold,n_patient))\n", |
|
|
568 |
"fold_acc = np.zeros((n_fold,n_patient))\n", |
|
|
569 |
"for patient in np.unique(Patient):\n", |
|
|
570 |
" id_patient = np.arange(len(Mean_Images))[Patient==patient]\n", |
|
|
571 |
" n_fold = 5\n", |
|
|
572 |
" length = len(id_patient)\n", |
|
|
573 |
" \n", |
|
|
574 |
" n_patient = len(np.unique(Patient))\n", |
|
|
575 |
" \n", |
|
|
576 |
" train_id, test_id = kfold(length,n_fold)\n", |
|
|
577 |
" \n", |
|
|
578 |
" for fold in range(n_fold):\n", |
|
|
579 |
" X_train = Mean_Images[id_patient[train_id[fold]]]\n", |
|
|
580 |
" X_test = Mean_Images[id_patient[test_id[fold]]]\n", |
|
|
581 |
" y_train = Label[id_patient[train_id[fold]]]\n", |
|
|
582 |
" y_test = Label[id_patient[test_id[fold]]] \n", |
|
|
583 |
"\n", |
|
|
584 |
" print(\"Begin Training Fold %d/%d\\t of Patient %d\" % \n", |
|
|
585 |
" (fold+1,n_fold, patient))\n", |
|
|
586 |
"\n", |
|
|
587 |
" CNN = BasicCNN().cuda(0)\n", |
|
|
588 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
589 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n", |
|
|
590 |
"\n", |
|
|
591 |
" n_epochs = 50\n", |
|
|
592 |
" for epoch in range(n_epochs):\n", |
|
|
593 |
" running_loss = 0.0\n", |
|
|
594 |
" batchsize = 5\n", |
|
|
595 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
596 |
" optimizer.zero_grad()\n", |
|
|
597 |
"\n", |
|
|
598 |
" # forward + backward + optimize\n", |
|
|
599 |
" outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n", |
|
|
600 |
" loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n", |
|
|
601 |
" loss.backward()\n", |
|
|
602 |
" optimizer.step()\n", |
|
|
603 |
" running_loss += loss.item()\n", |
|
|
604 |
"\n", |
|
|
605 |
" #acc\n", |
|
|
606 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n", |
|
|
607 |
" acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n", |
|
|
608 |
"\n", |
|
|
609 |
" #val Loss\n", |
|
|
610 |
" val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n", |
|
|
611 |
" val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n", |
|
|
612 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
613 |
" val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n", |
|
|
614 |
"\n", |
|
|
615 |
" #if epoch%10==0:\n", |
|
|
616 |
" # print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
617 |
" # (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
618 |
" fold_vloss[fold, p ] = val_loss.item()\n", |
|
|
619 |
" fold_loss[fold, p] = running_loss/i\n", |
|
|
620 |
" fold_vacc[fold, p] = val_acc\n", |
|
|
621 |
" fold_acc[fold, p] = acc\n", |
|
|
622 |
" print('Finish Training Fold %d/%d\\t of Patient %d' % \n", |
|
|
623 |
" (fold+1,n_fold, patient))\n", |
|
|
624 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
625 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
626 |
" \n", |
|
|
627 |
" p = p + 1" |
|
|
628 |
] |
|
|
629 |
}, |
|
|
630 |
{ |
|
|
631 |
"cell_type": "markdown", |
|
|
632 |
"metadata": {}, |
|
|
633 |
"source": [ |
|
|
634 |
"### Peresented Results" |
|
|
635 |
] |
|
|
636 |
}, |
|
|
637 |
{ |
|
|
638 |
"cell_type": "code", |
|
|
639 |
"execution_count": 493, |
|
|
640 |
"metadata": {}, |
|
|
641 |
"outputs": [ |
|
|
642 |
{ |
|
|
643 |
"data": { |
|
|
644 |
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAtcAAAKUCAYAAADPQhSfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdfZild10f/vfHXR4qIWGX4CokkICRDhkrmhTaH2u744pEfq2pIpWxRqCj+dmf2bZAqeBACNFRfKjaKrVFJi0IzooppalNDUjOqFv1MknFBzJFQ3haowLZ5WERhF2+/eOcCbPDzO7M5D7zcPb1uq5z7bmfP585M5P33Pne912ttQAAAA/el2x1AQAAMCqEawAA6IhwDQAAHRGuAQCgI8I1AAB0RLgGAICOCNcAm6yqXlBVR5ZMn6iqJ65l3Q0c639W1fM3uj0A6yNcA9teVX1nVd05CKF/PgiM+7eolodX1ceq6htWWPbTVXXzevfZWjuvtXZvB7XdUFVvWrbvb26tveHB7vssx2xV9bRhHQNgJxGugW2tql6c5GeS/EiSfUken+TfJ7l6lfV3D7Oe1tpnkvxyku9edtxdSSaTDC3IbjdVVUmuSXIsyaaeHa8+/w0Dth2/mIBtq6ouSHJjku9vrb21tfap1trnWmv/vbX20sE6N1TVzVX1pqr6RJIXVNXDqupnquq+wetnquphg/UvrKpfHZx9PlZVv7UY0qrqB6rqz6rqk1X1nqo6uEppb0jynKr60iXznpX+79T/OdjXy6rqvYN93V1V33qGPltVfeXg/aOr6paq+kRV/V6SJy1b999W1YcGy++qqq8fzL8qyQ8m+Y7BGf4/GMyfr6rvGbz/kqp6RVV9oKo+XFVvHHyNU1WXDOp4flV9sKo+WlXTZ/mIvj7JY5P8iyTPq6qHLqv1e6tqYcnX4OsG8y+uqrdW1Ueq6v6q+rnB/NPOvC+pafeSXmaq6n8l+askT6yqFy45xr1V9f8tq+HqqnrX4Ov13qq6qqqeW1V3LVvvJVX1trP0C3BWwjWwnf3dJA9P8l/Pst7VSW5O8qgkb04yneTvJHlqkq9J8rQkrxis+5IkR5M8Jv0z4T+YpFXVk5Ncl+Rvt9YemX5Yfv9KB2ut/XaSP0/ybUtmX5Pkl1prJwfT700/fF6Q5NVJ3lRVX7GGnl+b5DNJviLJPx28lrpj0NfeJL+U5Feq6uGttV9L/+z+Lw+GmXzNCvt+weA1keSJSc5L8nPL1tmf5MlJDia5vqrGzlDr85P89/TP5CfJP1hcUFXPTXJD+mf4z0/yLUnuH5zh/9UkH0hySZLHJTl8hmMsd02Sa5M8crCPDw+Oe36SFyb56SUh/mlJ3pjkpel/b/y99D/TW5Jcuqy370ryi+uoA2BFwjWwnT06yUeXBNbV/E5r7W2ttc+31j6d5J8kubG19uHW2kfSD7fXDNb9XPrB9QmDs+C/1VprSU4leViSp1TVQ1pr72+tvfcMx3xjBkNDqur89AP+A0NCWmu/0lq7b1DTLyf50/RD/qoGwfM5Sa4fnKX/4ywbZtJae1Nr7f7W2snW2r8Z1Pzks3x9Fv2TJD/VWru3tXYiycvTP+O8dCjNq1trn26t/UGSP0j/j5OVav3SJM9N/w+Kz6X/x83SoSHfk+THW2t3tL57WmsfGHwNHpvkpYMeP9NaW88Fm/+5tfbuQf+fa639j9baewfH+I0kb0//j5okmUpyU2vtHYPP4c9aa/+ntfbX6f9B8F2DXi5PP+j/6jrqAFiRcA1sZ/cnuXAN46g/tGz6semf1Vz0gcG8JPmJJPckeftgGMHLkqS1dk+Sf5n+2dYPV9Xhqnps8sDdPBZfjx/s541JJqrqcUm+Pck9rbXfXzxgVX33YDjCx6rqY0nGk1x4lj4ek2T3sn6W9rE4fGGhqj4+2O8Fa9jvopW+LrvTP4O/6C+WvP+r9M9ur+Rbk5xMcutg+s1JvrmqHjOYvjj9s/fLXZzkA2v4g2k1p33WVfXNVfW7gyE+H0vy7Hzh67FaDUn/j5bvrHpg3PhbBqEb4EERroHt7HfSHyLxj86yXls2fV+SJyyZfvxgXlprn2ytvaS19sQk/zDJixfHVrfWfqm1tn+wbUvyY4P55y15fXAw74NJfiv9s8HXpB+2kyRV9YQkv5D+MJNHt9YeleSPk9RZ+vhI+oH14mW1L+7365P8QJJ/nGTPYL8fX7Lf5V+H5Vb6upxM8pdn2W4lz08/eH+wqv4iya8keUj6F3Um/RD8pBW2+1CSx6/yB9Onkiwdx/7lK6zzQI/VH0f/X5L8ZJJ9g6/HrfnC12O1GtJa+90kn03/LPd3xpAQoCPCNbBttdY+nuT6JK+tqn9UVV9aVQ8ZnK388TNsOpfkFVX1mKq6cLCPNyVJVf2DqvrKwRnLT6Q/HORUVT25qr5hENg+k+TTg2Vn8ob0A/Qz0j9zu+gR6YfAjwyO+cL0z1yfrd9TSd6a5IZBr0/J6UMtHpl+GP5Ikt1VdX36Y40X/WWSS2r1u2jMJXlRVV1aVeflC2O013UWeXC2/mD6Y52fmi+Mbf+xJfW+Psm/qqorqu8rB390/F7649VfU1WPqP6tDZ8x2OZdSf5eVT1+cKHly89SykPTHxbzkSQnq+qbk3zTkuWzSV5YVQerfzHn46rqby5Z/sb0x5yfXOfQFIBVCdfAttZa+6kkL07/gsSPpH828rokZ7qzww8nuTPJHyb5oyT/ezAvSS5L8utJTqR/Zvzft9bm0w9pr0ny0fSHRnxZ+hc7nsnNSfYkeWdr7c+X1Hx3kn8z2P9fJvnqJP9rLf0OejtvUMN/TvKfliy7Lf27kfxJ+kM6PpPTh0n8yuDf+6vqf6+w75vSP0P7m0neN9j+0BrrWuqaJO9qrb29tfYXi68k/y7J36qq8dbarySZSf+iy0+m/3ntHfwB8Q+TfGWSD6Z/cel3JElr7R3pj4X+wyR35SxjoFtrn0zyz5O8Jcnx9M9A37Jk+e9lcJFj+mf4fyOnn7n/xfT/6HHWGuhM9a/jAYBzS1X9jfTvNvJ1rbU/3ep6gNHgzDUA56p/luQOwRro0lCfZAYA21FVvT/9Cx/PdrEswLoYFgIAAB0xLAQAADoiXANsgqpqVfWVHe/zB6vq9V3uE4AHR7gG2KFaaz/SWvuejWxbVc+qqt+sqk9W1Ueq6jeq6lsGy14w+GPgpcu2OVpVBwbvbxis89wly3cP5l2y4aYAdjjhGuAcU1Xfnv49sd+Y5KL0H39+ffr3n150LMkPVNX5X7yH09a5sap2DatWgJ1GuAbYPM+uqnur6qNV9ROLT1KsqidV1e1Vdf9g2Zur6lGLG1XVD1TVnw3OMr9n8XHtg7PHb1qy3v6q+u2q+lhVfaiqXrC8gMGTKX8qyQ+11l7fWvt4a+3zrbXfaK1975JVF9J/CM6LztDPr6X/CPHvehBfE4CRIlwDbJ5vTXJlkq9LcnWSfzqYX0l+NMljk4wluTjJDUlSVU9O/6mNf7u19sgkz0ry/uU7rqrHp//0xp9N8pj0H0n+rhVqePJg/zevod5Xpv+49L2rLG+DdV5VVQ9Zw/4ARp5wDbB5fqy1dqy19sEkP5NkMklaa/e01t7RWvvr1tpH0j+z/PcH25xK/9HsT6mqh7TW3t9ae+8K+/4nSX69tTbXWvtca+3+1tpK4frRg3//fIVlpxls//YkP3CGdW5J/7H0Gxr7DTBqhGuAzfOhJe8/kP6Z6lTVl1XV4cHQj08keVOSC5N+8E7yL9M/k/3hwXqPXWHfFydZKXQvd//g369YY83XJ/lnVfXlZ1jnFUmmkzx8jfsEGFnCNcDmuXjJ+8cnuW/w/kfTH2Lxt1pr56c/hrkWV2yt/VJrbX+SJwzW+7EV9v2hJE9aQw3vGaz7nLUU3Fr7P0nemuQHz7DOO5Lck+T/X8s+AUaZcA2weV5aVXuq6uIk/yLJLw/mPzLJiSQfq6rHJXngFnhV9eSq+oaqeliSzyT5dPpDRZZ7c5JvrKp/PLgl3qOr6qnLV2r9x/K+OMkrq+qFVXV+VX3J4GLI161S96uTvDDJo1ZZnvTPXP/rMzUPcC4QrgE2z39Lclf6Fxr+jySzg/mvTv8ix48P5r91yTYPS/KaJB9N8hdJviwrnEUejON+dpKXpH+LvHcl+ZqVimit3ZzkO9K/oPK+JH+Z5IcH9a20/vuS/GKSR6zWWGvtfyX5vdWWA5wrqn8SAwAAeLCcuQYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoyO6tLqArF154YbvkkkuGfpxPfepTecQjHjH042yWUepnlHpJRqufUeolGa1+RqmXZLT6GaVektHqZ5R6SUarn83q5a677vpoa+0xKy0bmXB9ySWX5M477xz6cebn53PgwIGhH2ezjFI/o9RLMlr9jFIvyWj1M0q9JKPVzyj1koxWP6PUSzJa/WxWL1X1gdWWGRYCAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4MLVxX1U1V9eGq+uNVlldV/buquqeq/rCqvm7JsudX1Z8OXs8fVo0A56q5ubmMj4/n4MGDGR8fz9zc3FaXBNuenxvWYvcQ9/2fk/xckjeusvybk1w2eD09yc8neXpV7U3yqiRXJmlJ7qqqW1prx4dYK8A5Y25uLtPT05mdnc2pU6eya9euTE1NJUkmJye3uDrYnvzcsFZDO3PdWvvNJMfOsMrVSd7Y+n43yaOq6iuSPCvJO1prxwaB+h1JrhpWnQDnmpmZmczOzmZiYiK7d+/OxMREZmdnMzMzs9Wlwbbl54a1qtba8HZedUmSX22tja+w7FeTvKa1dmQw/c4kP5DkQJKHt9Z+eDD/lUk+3Vr7yRX2cW2Sa5Nk3759Vxw+fHg4jSxx4sSJnHfeeUM/zmYZpX5GqZdkZ/YzMTGxoe16vV7HlTx4o9TLcgcPHsxtt92W3bt3P/B9dvLkyTzrWc/KO9/5zq0u70HZiT83qxmlXpKd34+fm813YP7qTTvW/IH/tq71JyYm7mqtXbnSsmEOCzmbWmFeO8P8L57Z2uuSvC5JrrzyynbgwIHOilvN/Px8NuM4m2WU+hmlXpKd2c9qf6xX1arLtqsz1bsT+1lqbGwsu3btyoEDBx74Puv1ehkbG9tx33PL7cSfm9WMUi/Jzu/Hz80WOPDxdW+y0V7Wv8XqtvJuIUeTXLxk+qIk951hPgAdmJ6eztTUVHq9Xk6ePJler5epqalMT09vdWmwbfm5Ya228sz1LUmuq6rD6V/Q+PHW2p9X1W1JfqSq9gzW+6YkL9+qIgFGzeLFV4cOHcrCwkLGxsYyMzPjoiw4Az83rNXQwnVVzaV/lv3Cqjqa/h1AHpIkrbX/kOTWJM9Ock+Sv0rywsGyY1X1Q0nuGOzqxtbamS6MBGCdJicnMzk5uX3/dzBsQ35uWIuhhevW2hn/lGv9AYvfv8qym5LcNIy6AABgWDyhEQAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAzN3NxcxsfHc/DgwYyPj2dubm6rS4Kh2r3VBQAAo2lubi7T09OZnZ3NqVOnsmvXrkxNTSVJJicnt7g6GA5nrgGAoZiZmcns7GwmJiaye/fuTExMZHZ2NjMzM1tdGgyNcA0ADMXCwkL2799/2rz9+/dnYWFhiyqC4ROuAYChGBsby5EjR06bd+TIkYyNjW1RRTB8xlwDwJDt3bs3x48fH/px9uzZk2PHjg39OGs1PT2dqampB8Zc93q9TE1NGRbCSBOuAWDIjh8/ntbauraZn5/PgQMH1rVNVa1r/WFbvGjx0KFDWVhYyNjYWGZmZlzMyEgTrmETnKtnrQAmJyczOTm5oT8WYCcy5ho2weJZq/W8er3eurfZjAAPAKxOuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI7s3uoCAAC2zA0XrHuTA0kyv5FjfXwDG7HTDDVcV9VVSf5tkl1JXt9ae82y5U9IclOSxyQ5luS7WmtHB8tOJfmjwaofbK19yzBrBWDnmpuby8zMTBYWFjI2Npbp6elMTk5udVnsAPXqT6S1tq5t5ufnc+DAgfUdpyrthnVtwg41tHBdVbuSvDbJM5McTXJHVd3SWrt7yWo/meSNrbU3VNU3JPnRJNcMln26tfbUYdUHwGiYm5vL9PR0Zmdnc+rUqezatStTU1NJImADm26YY66fluSe1tq9rbXPJjmc5Opl6zwlyTsH73srLAeAM5qZmcns7GwmJiaye/fuTExMZHZ2NjMzM1tdGmwbVbXqa2JiYtVlrN8wh4U8LsmHlkwfTfL0Zev8QZLnpD905FuTPLKqHt1auz/Jw6vqziQnk7ymtfa25QeoqmuTXJsk+/bty/z8fOdNLHfixIlNOc5mGaV+tnMv7VXnr3tc34Fk3WP62qvO37Zfg+1a10aNSj/b+edmrRYWFnLq1KnMz88/0M+pU6eysLCwrXpbby0b/Wy2U89LbefvtXPhs+n1eqsum5iYWHX5dv3MVrMtvs9aa0N5JXlu+uOsF6evSfKzy9Z5bJK3Jvn99AP20SQXLC4b/PvEJO9P8qQzHe+KK65om6HX623KcTbLKPUzSr20Nlr99H/VjI5R6mcUvs8uv/zydvvtt7fWvtDP7bff3i6//PItrOp0G/me2chns52/N7fr95rPZnvXtl6b9X2W5M62SiYd5pnro0kuXjJ9UZL7lq7QWrsvybclSVWdl+Q5rbWPL1mW1tq9VTWf5GuTvHeI9bJDbPR/U7V1XrDCyvbu3Zvjx4+ve7v1fm579uzJsWPH1n0czj3T09OZmpp6YMx1r9fL1NSUYSHAlhhmuL4jyWVVdWmSP0vyvCTfuXSFqrowybHW2ueTvDz9O4ekqvYk+avW2l8P1nlGkh8fYq3sIKuF5KoSoDfB8ePHN+3KeliLxYsWDx069MDdQmZmZlzMCGyJoYXr1trJqrouyW3p34rvptbau6vqxvRPpd+S/rDSH62qluQ3k3z/YPOxJP+xqj6f/kWXr2mn32UEgOXO4fv1Tk5OZnJyckN/yAF0aaj3uW6t3Zrk1mXzrl/y/uYkN6+w3W8n+eph1gYwcjYQeIVRgG55/DkAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAMCONjc3l/Hx8Rw8eDDj4+OZm5vbslp2b9mRAYAdae/evTl+/PjQj7Nnz54cO3Zs6MdhZ5ubm8v09HRmZ2dz6tSp7Nq1K1NTU0mSycnJTa/HmWsAYF2OHz+e1tq6Xr1eb93bbEaAZ+ebmZnJ7OxsJiYmsnv37kxMTGR2djYzMzNbUo9wDQDAjrWwsJD9+/efNm///v1ZWFjYknqEawAAdqyxsbEcOXLktHlHjhzJ2NjYltRjzDUAcE6rqqEfY8+ePUM/xrlqeno6U1NTD4y57vV6mZqa2rJhIcI1AHDOaq2te5v5+fkcOHCg+2LYkMWLFg8dOpSFhYWMjY1lZmZmSy5mTIRrABi69qrzkxsuWNc2B5JkfgPHgXPQ5ORkJicnt8UfPsI1AAzbDR9f9yZVtaGzqsDWckEjAAB0RLgGABgRe/fuTVWt65Vk3dvs3bt3izvdvoRrAIAR4QE/W8+Ya+CcttHHOK/31l0e47wB67wAMNnYRYD9Y61/TDTASoRr4Jy2eJZnPTZyNfpm3Ed35Gwg8G6HOwUA5zbDQgDOQXNzcxkfH8/BgwczPj6eubm5rS4JYCQ4cw1wjpmbm8v09PQDTzPbtWtXpqamkmTLHroAMCqcuQY4x8zMzGR2djYTExPZvXt3JiYmMjs7u2WPCgYYJcI1wDlmYWEh+/fvP23e/v37s7CwsEUVAYwO4RrgHDM2NpYjR46cNu/IkSMZGxvboooARodwDXCOmZ6eztTUVHq9Xk6ePJler5epqalMT09vdWkAO54LGgHOMYsXLR46dCgLCwsZGxvLzMyMixkBOiBcA5yDJicnMzk56b7QAB0zLAQAADoiXAMAQEeEawAA6IhwDQAAHRGuAQCgI8I1AAB0RLgGAICOuM81ALAu7VXnJzdcsK5tDiTJ/AaOAzuMcA0ArM8NH1/3Jh5YxLnCsBAAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA64j7Xo2SdN/RPNnZT//6x1n+PUwC+WFVtaFlrbRjlAA+ScD1C6tWfWPcv243c1L+q0m5Y1yYArGK139seugI7k3ANADAiPJp+6w01XFfVVUn+bZJdSV7fWnvNsuVPSHJTksckOZbku1prRwfLnp/kFYNVf7i19oZh1grbxdzcXGZmZrKwsJCxsbFMT09ncnJyq8sCYAfwf7G33tDCdVXtSvLaJM9McjTJHVV1S2vt7iWr/WSSN7bW3lBV35DkR5NcU1V7k7wqyZVJWpK7BtseH1a9sB3Mzc1leno6s7OzOXXqVHbt2pWpqakkEbABYAcY5t1Cnpbkntbava21zyY5nOTqZes8Jck7B+97S5Y/K8k7WmvHBoH6HUmuGmKtsC3MzMxkdnY2ExMT2b17dyYmJjI7O5uZmZmtLg0AWINhDgt5XJIPLZk+muTpy9b5gyTPSX/oyLcmeWRVPXqVbR+3/ABVdW2Sa5Nk3759mZ+f76r2VZ04cWJTjrNR661to/1s16/Bdq1rrRYWFnLq1KnMz88/8NmcOnUqCwsL26q3Ufs+G7V+1mO7/05br1HqZ5R6SUarn+3ei99p81tbRGttKK8kz01/nPXi9DVJfnbZOo9N8tYkv59+wD6a5IIkL03yiiXrvTLJS850vCuuuKJthl6vtynH2Yj+x7k+G+lnI8fZDNu1rvW4/PLL2+23395a+8Jnc/vtt7fLL798C6s63ah9n41aP+u1nX+nbcQo9TNKvbQ2Wv1s5178TuttynGS3NlWyaTDHBZyNMnFS6YvSnLf0hVaa/e11r6ttfa1SaYH8z6+lm1hFE1PT2dqaiq9Xi8nT55Mr9fL1NRUpqent7o0AGANhjks5I4kl1XVpUn+LMnzknzn0hWq6sIkx1prn0/y8vTvHJIktyX5karaM5j+psFyGGmLFy0eOnTogbuFzMzMbKuLGd3mCQBWN7Rw3Vo7WVXXpR+UdyW5qbX27qq6Mf1T6bek/9/cH62qluQ3k3z/YNtjVfVD6Qf0JLmxtXZsWLXCdjI5OZnJyclt+wAJt3kCgNUN9T7XrbVbk9y6bN71S97fnOTmVba9KV84kw0AANveMMdcAwDAOUW4BgCAjgjXAADQEeEaAAA6MtQLGneyqtrQduu9iwIAAKNDuF7FaiG5qgRoAABWZFgIAAB0RLgGAICOCNcAANAR4RoAADoiXAMAQEeEawCANZibm8v4+HgOHjyY8fHxzM3NbXVJbENuxQcAcBZzc3OZnp7O7OxsTp06lV27dmVqaipJMjk5ucXVsZ04cw0AcBYzMzOZnZ3NxMREdu/enYmJiczOzmZmZmarS2ObEa4BAM5iYWEh+/fvP23e/v37s7CwsEUVsV0J1wAAZzE2NpYjR46cNu/IkSMZGxvboorYroRrAICzmJ6eztTUVHq9Xk6ePJler5epqalMT09vdWlsMy5oBAA4i8WLFg8dOpSFhYWMjY1lZmbGxYx8EeEaAGANJicnMzk5mfn5+Rw4cGCry2GbMiwEAAA6IlwDAEBHDAsBABghVTX0Y+zZs2fox9ipnLkGABgRrbV1v3q93rq3OXbs2Fa3um0J1wAA0BHhGgAAOmLMNXBOa686P7nhgnVtcyBJ5jdwHABGnnANnNPq1Z9Ia21d22zkHrdVlXbDujYBYAcyLAQAADoiXAMAQEeEa7alvXv3pqrW9Uqy7m327t27xZ0CAKNEuGZbOn78+Kbcp/P48eNb3SoAMEKEawAA6IhwDQAAHRGuAYChmZuby/j4eA4ePJjx8fHMzc1tdUkwVO5zDQAMxdzcXKanpzM7O5tTp05l165dmZqaSpJMTk5ucXUwHM5cAwBDMTMzk9nZ2UxMTGT37t2ZmJjI7OxsZmZmtro0GBrhGgAYioWFhezfv/+0efv378/CwsIWVQTDJ1wDAEMxNjaWI0eOnDbvyJEjGRsb26KKYPjO6XDtQSUAMDzT09OZmppKr9fLyZMn0+v1MjU1lenp6a0uDYbmnL6gcfFBJesxPz+fAwcOrGubxVAOAOeSxYsWDx06lIWFhYyNjWVmZsbFjIy0czpcAwDDNTk5mcnJyQ2dnIKd6JweFgIAAF0SrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQkaGG66q6qqreU1X3VNXLVlj++KrqVdXvV9UfVq/LnxQAACAASURBVNWzB/MvqapPV9W7Bq//MMw6AQCgC7uHteOq2pXktUmemeRokjuq6pbW2t1LVntFkre01n6+qp6S5NYklwyWvbe19tRh1QcAAF0b5pnrpyW5p7V2b2vts0kOJ7l62TotyfmD9xckuW+I9QAAwFAN7cx1kscl+dCS6aNJnr5snRuSvL2qDiV5RJJvXLLs0qr6/SSfSPKK1tpvLT9AVV2b5Nok2bdvX+bn59dd5Hq3OXHixKYcZ6NGqZ9R6mUjNtrPZhi1z2bU+lmP7fx9thGj1M8o9ZKMVj+j1EsyWv1si15aa0N5JXluktcvmb4myc8uW+fFSV4yeP93k9yd/tn0hyV59GD+FemH9PPPdLwrrriirVe//fXp9XqbcpyNGKV+RqmXjdpIP5th1D6bUetnvbbr99lGjVI/o9RLa6PVzyj10tpo9bNZvSS5s62SSYc5LORokouXTF+ULx72MZXkLen/V+d3kjw8yYWttb9urd0/mH9Xkvcm+aoh1goAAA/aMMP1HUkuq6pLq+qhSZ6X5JZl63wwycEkqaqx9MP1R6rqMYMLIlNVT0xyWZJ7h1grAAA8aEMbc91aO1lV1yW5LcmuJDe11t5dVTemfyr9liQvSfILVfWi9C9ufEFrrVXV30tyY1WdTHIqyfe11o4Nq1YAAOjCMC9oTGvt1vRvr7d03vVL3t+d5BkrbPdfkvyXYdbG9tZedX5ywwXr2uZAksxv4DisW1UN/Rh79uwZ+jEAoGtDDdewUfXqTyxe9Lpm8/PzOXDgwPqOU5V2w7o2Oeet93NJNvbZAMBO5PHnAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjpzTD5HxFEAAALp0TodrTwEEAKBLhoUAAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQkd1bXQDAVquqoR9jz549Qz8GAFtPuAbOaa21dW9TVRvaDoDRZ1gIAAB0RLgGAICOCNcAANCRs4brqrquqlyJAwAAZ7GWM9dfnuSOqnpLVV1Vm3FZPQAA7EBnDdettVckuSzJbJIXJPnTqvqRqnrSkGsDAIAdZU1jrlv/nlN/MXidTLInyc1V9eNDrA0AAHaUs97nuqr+eZLnJ/loktcneWlr7XNV9SVJ/jTJvx5uiQAAsDOs5SEyFyb5ttbaB5bObK19vqr+wXDKAgCAnWctw0JuTXJscaKqHllVT0+S1trCsAoDAICdZi3h+ueTnFgy/anBPAAAYIm1hOsaXNCYpD8cJGsbTgIAAOeUtYTre6vqn1fVQwavf5Hk3mEXBgAAO81awvX3Jfl/kvxZkqNJnp7k2mEWBQAAO9FZh3e01j6c5HmbUAsAAOxoa7nP9cOTTCW5PMnDF+e31v7pEOsCAIAdZy3DQn4xyZcneVaS30hyUZJPDrMoAADYidYSrr+ytfbKJJ9qrb0hyf+b5KuHWxYAAOw8awnXnxv8+7GqGk9yQZJLhlYRAADsUGu5X/XrqmpPklckuSXJeUleOdSqAABgBzpjuK6qL0nyidba8SS/meSJm1IVAADsQGccFjJ4GuN1m1QLAADsaGsZc/2OqvpXVXVxVe1dfA29MgAA2GHWMuZ68X7W379kXoshIgAAcJq1PKHx0s0oBAAAdrq1PKHxu1ea31p7Y/flAADAzrWWMdd/e8nr65PckORb1rLzqrqqqt5TVfdU1ctWWP74qupV1e9X1R9W1bOXLHv5YLv3VNWz1tQNAABsobUMCzm0dLqqLkj/kehnVFW7krw2yTOTHE1yR1Xd0lq7e8lqr0jyltbaz1fVU5LcmuSSwfvnJbk8yWOT/HpVfVVr7dQa+wIAgE23ljPXy/1VksvWsN7TktzTWru3tfbZJIeTXL1snZbk/MH7C5LcN3h/dZLDrbW/bq29L8k9g/0BAMC2tZYx1/89/RCc9MP4U5K8ZQ37flySDy2ZPprk6cvWuSHJ26vqUJJHJPnGJdv+7rJtH7dCbdcmuTZJ9u3bl/n5+TWUdbr1bnPixIlNOc5GjVI/o9TLRmy0n+1oJ/YyMTFxxuVVteL8Xq83jHKGZid+NmcySv2MUi/JaPUzSr0ko9XPtuiltXbGV5K/v+T1jCQXnW2bwXbPTfL6JdPXJPnZZeu8OMlLBu//bpK70w/wr03yXUvWm03ynDMd74orrmjr1W9/fXq93qYcZyNGqZ9R6mWjNtLPdjVKvbQ2Wv2MUi+tjVY/o9RLa6PVzyj10tpo9bNZvSS5s62SSddyn+sPJvnz1tpnkqSq/kZVXdJae/9Ztjua5OIl0xflC8M+Fk0luWoQ8n+nqh6e5MI1bgsAANvKWsZc/0qSzy+ZPjWYdzZ3JLmsqi6tqoemf4HiLcvW+WCSg0lSVWNJHp7kI4P1nldVD6uqS9Mf4/17azgmAABsmbWcud7d+hckJklaa58dhOUzaq2drKrrktyWZFeSm1pr766qG9M/lX5Lkpck+YWqelH647pfMDjV/u6qekv6w0ROJvn+5k4hAABsc2sJ1x+pqm8ZhOFU1dVJPrqWnbfWbk3/9npL512/5P3d6Y/jXmnbmSQzazkOAABsB2sJ19+X5M1V9XOD6aNJVnxqIwAAnMvW8hCZ9yb5O1V1XpJqrX1y+GXB6rc669KePXuGfgwA4Nxx1gsaq+pHqupRrbUTrbVPVtWeqvrhzSiOc9dqt7c502sj2x07dmyLOwUARsla7hbyza21jy1OtNaOJ3n28EoCAICdaS3heldVPWxxoqr+RpKHnWF9AAA4J63lgsY3JXlnVf2nwfQLk7xheCUBAMDOtJYLGn+8qv4wyTcmqSS/luQJwy4MAAB2mrUMC0mSv0j/KY3PSf+JigtDqwgAAHaoVc9cV9VXpf/I8skk9yf55fRvxTexSbUBAMCOcqZhIf8nyW8l+YettXuSZPCYcgAAYAVnGhbynPSHg/Sq6heq6mD6Y64BAIAVrBquW2v/tbX2HUn+ZpL5JC9Ksq+qfr6qvmmT6gMAgB1jLXcL+VSSNyd5c1XtTfLcJC9L8vYh18YGeGQ4AMDWWct9rh/QWjuW5D8OXmwzi48AX4+q2tB2AAB8sbXeig8AADiLdZ25HkWGUQAA0JVz+sx1a23dr16vt+5tjh07ttWtAgCwCc7pcA0AAF0SrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB3ZvdUFMHxVtaHlrbVhlAMAMLKcuT4HtNZWffV6vVWXAQCwPsI1AAB0RLgGAICOCNcAANAR4RoAADoiXAMAQEeEawAA6IhwDQAAHRGuAQCgI8I1AAB0RLgGAICODDVcV9VVVfWeqrqnql62wvKfrqp3DV5/UlUfW7Ls1JJltwyzTgAA6MLuYe24qnYleW2SZyY5muSOqrqltXb34jqttRctWf9Qkq9dsotPt9aeOqz6AACga8M8c/20JPe01u5trX02yeEkV59h/ckkc0OsBwAAhqpaa8PZcdW3J7mqtfY9g+lrkjy9tXbdCus+IcnvJrmotXZqMO9kknclOZnkNa21t62w3bVJrk2Sffv2XXH48OGh9LLUiRMnct555w39OJtllPqZmJhIr9fb6jI6M0qfzSj1koxWP6PUSzJa/YxSL8lo9TNKvSSj1c9m9TIxMXFXa+3KlZYNbVhIklph3mpJ/nlJbl4M1gOPb63dV1VPTHJ7Vf1Ra+29p+2stdcleV2SXHnlle3AgQMdlH1m8/Pz2YzjbJZR62eUehmlz2aUeklGq59R6iUZrX5GqZdktPoZpV6S0epnO/QyzGEhR5NcvGT6oiT3rbLu87JsSEhr7b7Bv/cmmc/p47EBAGDbGWa4viPJZVV1aVU9NP0A/UV3/aiqJyfZk+R3lszbU1UPG7y/MMkzkty9fFsAANhOhjYspLV2sqquS3Jbkl1JbmqtvbuqbkxyZ2ttMWhPJjncTh/8PZbkP1bV59P/A+A1S+8yAgAA29Ewx1yntXZrkluXzbt+2fQNK2z320m+epi1AQBA1zyhEQAAOiJcAwBAR4RrAADoyFDHXMMwVK10C/WzLxvWA5MAABY5c82O01pb8dXr9VZdJlgDAJtBuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0JGhhuuquqqq3lNV91TVy1ZY/tNV9a7B60+q6mNLlj2/qv508Hr+MOsEAIAu7B7WjqtqV5LXJnlmkqNJ7qiqW1prdy+u01p70ZL1DyX52sH7vUleleTKJC3JXYNtjw+rXgAAeLCGeeb6aUnuaa3d21r7bJLDSa4+w/qTSeYG75+V5B2ttWODQP2OJFcNsVYAAHjQqrU2nB1XfXuSq1pr3zOYvibJ01tr162w7hOS/G6Si1prp6rqXyV5eGvthwfLX5nk0621n1y23bVJrk2Sffv2XXH48OGh9LLUiRMnct555w39OJtllPoZpV6S0epnlHpJRqufUeolGa1+RqmXZLT6GaVektHqZ7N6mZiYuKu1duVKy4Y2LCRJrTBvtST/vCQ3t9ZOrWfb1trrkrwuSa688sp24MCBDZS5PvPz89mM42yWUepnlHpJRqufUeolGa1+RqmXZLT6GaVektHqZ5R6SUarn+3QyzCHhRxNcvGS6YuS3LfKus/LF4aErHdbAADYFoYZru9IcllVXVpVD00/QN+yfKWqenKSPUl+Z8ns25J8U1Xtqao9Sb5pMA8AALatoQ0Laa2drKrr0g/Fu5Lc1Fp7d1XdmOTO1tpi0J5McrgtGfzdWjtWVT+UfkBPkhtba8eGVSsAAHRhmGOu01q7Ncmty+Zdv2z6hlW2vSnJTUMrDgAAOuYJjQAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGhiaubm5jI+P5+DBgxkfH8/c3NxWlwQAQ7V7qwsARtPc3Fymp6czOzubU6dOZdeuXZmamkqSTE5ObnF1ADAczlwDQzEzM5PZ2dlMTExk9+7dmZiYyOzsbGZmZra6NAAYGuEaGIqFhYXs37//tHn79+/PwsLCFlUEAMMnXANDMTY2liNHjpw278iRIxkbG9uiigBg+IRrYCimp6czNTWVXq+XkydPptfrZWpqKtPT01tdGgAMjQsagaFYvGjx0KFDWVhYyNjYWGZmZlzMCMBIE66BoZmcnMzk5GTm5+dz4MCBrS4HAIbOsBAAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgjXAADQEeEaAAA6IlwDAEBHhGsAAOiIcA0AAB0RrgEAoCPCNQAAdES4BgCAjgw1XFfVVVX1nqq6p6petso6/7iq7q6qd1fVLy2Zf6qq3jV43TLMOgEAoAu7h7XjqtqV5LVJnpnkaJI7quqW1trdS9a5LMnLkzyjtXa8qr5syS4+3Vp76rDqAwCArg3zzPXTktzTWru3tfbZJIeTXL1sne9N8trW2vEkaa19eIj1PChzc3MZHx/PwYMHMz4+nrm5ua0uCQCAbaZaa8PZcdW3J7mqtfY9g+lrkjy9tXbdknXeluRPkjwjya4kN7TWfm2w7GSSdyU5meQ1rbW3rXCMa5NcmyT79u274vDhw0Pp5Z3vfGdmZ2fz0pe+NJdeemne97735Sd+4icyNTWVgwcPDuWYm+XEiRM577zztrqMToxSL8lo9TNKvSSj1c8o9ZKMVj+j1EsyWv2MUi/JaPWzWb1MTEzc1Vq7csWFrbWhvJI8N8nrl0xfk+Rnl63zq0n+a5KHJLk0/eEjjxose+zg3ycmeX+SJ53peFdccUUblssvv7zdfvvtrbXWer1ea62122+/vV1++eVDO+ZmWexnFIxSL62NVj+j1Etro9XPKPXS2mj1M0q9tDZa/YxSL62NVj+b1UuSO9sqmXSYw0KOJrl4yfRFSe5bYZ3/1lr7XGvtfUnek+SyJGmt3Tf4994k80m+doi1ntHCwkL2799/2rz9+/dnYWFhiyoCAGA7Gma4viPJZVV1aVU9NMnzkiy/68fbkkwkSVVdmOSrktxbVXuq6mFL5j8jyd3ZImNjYzly5Mhp844cOZKxsbEtqggAgO1oaOG6tXYyyXVJbkuykOQtrbV3V9WNVfUtg9VuS3J/Vd2dpJfkpa21+5OMJbmzqv5gMP81bcldRjbb9PR0pqam0uv1cvLkyfR6vUxNTWV6enqrSgIAYBsa2q34kqS1dmuSW5fNu37J+5bkxYPX0nV+O8lXD7O29ZicnEySHDp0KAsLCxkbG8vMzMwD8wEAIBlyuB4lk5OTmZyczPz8fA4cOLDV5QAAsA15/DkAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrAADoiHANAAAdEa4BAKAjwjUAAHREuAYAgI4I1wAA0BHhGgAAOiJcAwBAR4RrgP/b3t0H2VXQZxz/PpPFJhBbkZdUSNrQNpVtdzRCJsUmzWQNxmAp1NdhhzqxbE07Iy/2xRbcKVboTu3oOO10mCq6NMwIm9EoyCjGpDapZlosBAIkLIgFwRRqaMFqChUSnv5xzg7LZtds9p69J/fs85nZ2XvPPffe55mb7P723HPuiYiIqEiG64iIiIiIimS4joiIiIioSIbriIiIiIiKZLiOiIiImIWGh4fp6elhzZo19PT0MDw8XHekRuiqO0BEREREtNfw8DADAwMMDQ1x6NAh5syZQ39/PwB9fX01p+ts2XIdERERMcsMDg4yNDREb28vXV1d9Pb2MjQ0xODgYN3ROl6G64iIiIhZZmRkhJUrV75s2cqVKxkZGakpUXNkuI6IiIiYZbq7u9m5c+fLlu3cuZPu7u6aEjVHhuuIiIiIWWZgYID+/n62b9/OwYMH2b59O/39/QwMDNQdrePlgMaIiIiIWWb0oMXLLruMkZERuru7GRwczMGMFchwHRERETEL9fX10dfXx44dO1i9enXdcRoju4VERERERFQkw3VEREREREUyXEdEREREVCTDdURERERERWZ0uJa0TtJDkr4j6cpJ1nm3pAck7ZV085jl6yU9XH6tn8mcERERERFVmLFPC5E0B7gOeDOwD7hT0m22HxizzhLgKmCF7WcknVoufzXwYWAZYGBXed9nZipvRERERESrZnLL9XLgO7Yfsf08sAm4cNw67wOuGx2abe8vl78F2Gb76fK2bcC6GcwaEREREdEy2Z6ZB5beCayz/Xvl9fcAv2b70jHr3Ap8G1gBzAH+wvYWSX8CzLX9l+V6fw48Z/vj455jA7ABYMGCBWdv2rRpRrqMdeDAAebPnz/jz9MuTerTpC7QrD5N6gLN6tOkLtCsPk3qAs3q06Qu0Kw+7erS29u7y/ayiW6byZPIaIJl4yf5LmAJsBpYCHxTUs8U74vt64HrAZYtW+Z2fAB60z5ovUl9mtQFmtWnSV2gWX2a1AWa1adJXaBZfZrUBZrV51joMpO7hewDFo25vhB4YoJ1vmT7BduPAg9RDNtTuW9ERERExDFlJofrO4Elks6Q9ArgIuC2cevcCvQCSDoZ+GXgEeBrwFpJJ0o6EVhbLouIiIiIOGbN2G4htg9KupRiKJ4D3GB7r6RrgLts38ZLQ/QDwCHgg7b/G0DStRQDOsA1tp+eqawREREREVWYyX2usX07cPu4ZVePuWzgj8qv8fe9AbhhJvNFRERERFQpZ2iMiIiIiKhIhuuIiIiIiIpkuI6IiIiIqEiG64iIiIiIimS4joiIiIioSIbriIiIiIiKZLiOiIiIiKiIio+a7nySngIea8NTnQz8Vxuep12a1KdJXaBZfZrUBZrVp0ldoFl9mtQFmtWnSV2gWX3a1eXnbZ8y0Q2NGa7bRdJdtpfVnaMqTerTpC7QrD5N6gLN6tOkLtCsPk3qAs3q06Qu0Kw+x0KX7BYSEREREVGRDNcRERERERXJcH30rq87QMWa1KdJXaBZfZrUBZrVp0ldoFl9mtQFmtWnSV2gWX1q75J9riMiIiIiKpIt1xERERERFclwHRERERFRkQzXUyTpBkn7Je2pO0urJC2StF3SiKS9kq6oO1MrJM2V9G+S7i37fKTuTK2SNEfSPZK+XHeWVkn6rqT7Je2WdFfdeVoh6VWSNkt6sPz/88a6M02XpNeWr8no1w8lfaDuXNMl6Q/L//97JA1Lmlt3plZIuqLssrcTX5eJfmdKerWkbZIeLr+fWGfGqZqky7vK1+ZFSR31EXaT9PlY+XPtPkm3SHpVnRmnapIu15Y9dkvaKum0dufKcD11G4F1dYeoyEHgj213A+cA75f0KzVnasWPgTfZfj2wFFgn6ZyaM7XqCmCk7hAV6rW9tO7PHq3A3wJbbJ8JvJ4Ofo1sP1S+JkuBs4FngVtqjjUtkk4HLgeW2e4B5gAX1Ztq+iT1AO8DllP8Oztf0pJ6Ux21jRz+O/NK4Ou2lwBfL693go0c3mUP8HbgG21P07qNHN5nG9Bj+3XAt4Gr2h1qmjZyeJeP2X5d+bPty8DV7Q6V4XqKbH8DeLruHFWw/aTtu8vLP6IYEE6vN9X0uXCgvHpc+dWxR+pKWgj8JvCZurPESyT9NLAKGAKw/bztH9SbqjJrgH+33Y6z3M6ULmCepC7geOCJmvO0ohu4w/aztg8C/wy8reZMR2WS35kXAjeWl28EfrutoaZpoi62R2w/VFOklkzSZ2v5bw3gDmBh24NNwyRdfjjm6gnUMA9kuJ7lJC0G3gB8q94krSl3o9gN7Ae22e7kPn8D/CnwYt1BKmJgq6RdkjbUHaYFvwA8BfxDucvOZySdUHeoilwEDNcdYrps/wfwceBx4Engf2xvrTdVS/YAqySdJOl44K3AopozVWGB7Seh2MgDnFpznpjYJcBX6w7RCkmDkr4HXEy2XEc7SZoPfAH4wLi/9DqO7UPlW0ALgeXl26odR9L5wH7bu+rOUqEVts8CzqPYBWlV3YGmqQs4C/h7228A/pfOeVt7UpJeAVwAfL7uLNNV7rt7IXAGcBpwgqTfqTfV9NkeAf6a4q36LcC9FLvzRcwoSQMU/9ZuqjtLK2wP2F5E0ePSdj9/hutZStJxFIP1Tba/WHeeqpRv0++gc/ePXwFcIOm7wCbgTZI+W2+k1th+ovy+n2Kf3uX1Jpq2fcC+Me+KbKYYtjvdecDdtr9fd5AWnAs8avsp2y8AXwR+veZMLbE9ZPss26so3vZ+uO5MFfi+pNcAlN/315wnxpC0HjgfuNjNOQnKzcA72v2kGa5nIUmi2G90xPYn6s7TKkmnjB7ZLGkexS/aB+tNNT22r7K90PZiirfq/8l2x26Bk3SCpFeOXgbWUrzl3XFs/yfwPUmvLRetAR6oMVJV+ujgXUJKjwPnSDq+/Pm2hg4+2BRA0qnl95+jOHCu018jgNuA9eXl9cCXaswSY0haB/wZcIHtZ+vO04pxB/9eQA3zQFe7n7BTSRoGVgMnS9oHfNj2UL2ppm0F8B7g/nI/ZYAP2b69xkyteA1wo6Q5FH8wfs52x3+EXUMsAG4p5h26gJttb6k3UksuA24qd6V4BPjdmvO0pNyf983A79edpRW2vyVpM3A3xVva93AMnAK5RV+QdBLwAvB+28/UHehoTPQ7E/go8DlJ/RR/EL2rvoRTN0mXp4G/A04BviJpt+231Jdy6ibpcxXwU8C28uf1Hbb/oLaQUzRJl7eWG0FeBB4D2t4jpz+PiIiIiKhIdguJiIiIiKhIhuuIiIiIiIpkuI6IiIiIqEiG64iIiIiIimS4joiIiIioSIbriIhjnKRDknZL2iPp8+VH6P2k9T807vq/tPDc75V02iS3XSPp3AmWr5aUj8OMiFkpw3VExLHvOdtLbfcAz3Pkz2192XBtu5WzFb6X4pTih7F9te1/bOGxIyIaJ8N1RERn+SbwSwCSbpW0S9JeSRvKZR8F5pVbum8qlx0YvbOkD0q6U9J9kj5SLlssaUTSp8vH2ippnqR3AssoTpyzuzwDKmMea2O5DpLWSXpQ0k6KMwpGRMxKGa4jIjqEpC7gPOD+ctElts+mGIAvl3SS7St5aUv3xePuvxZYoYLxRwAAAXNJREFUAiwHlgJnS1pV3rwEuM72rwI/AN5hezNwF3Bx+XjPTZJrLvBp4LeA3wB+trrWERGdJcN1RMSxb56k3RSD7uPAULn8ckn3AncAiygG5J9kbfl1D8Wpws8cc59Hbe8uL+8CFh9FvjPL+z/s4rS/nz2K+0ZENEpX3QEiIuKInrO9dOwCSauBc4E32n5W0g5g7hEeR8Bf2f7UuMdaDPx4zKJDwMt2AZkCH+X6ERGNlC3XERGd6WeAZ8rB+kzgnDG3vSDpuAnu8zXgEknzASSdLunUIzzPj4BXHmGdB4EzJP1ieb3vyPEjIpopw3VERGfaAnRJug+4lmLXkFHXA/eNHtA4yvZW4GbgXyXdD2zmyIPzRuCTEx3QOOZx/w/YAHylPKDxsWn0iYhoBBW7x0VERERERKuy5ToiIiIioiIZriMiIiIiKpLhOiIiIiKiIhmuIyIiIiIqkuE6IiIiIqIiGa4jIiIiIiqS4ToiIiIioiL/D0+/g1YpUHEKAAAAAElFTkSuQmCC\n", |
|
|
645 |
"text/plain": [ |
|
|
646 |
"<Figure size 864x720 with 1 Axes>" |
|
|
647 |
] |
|
|
648 |
}, |
|
|
649 |
"metadata": { |
|
|
650 |
"needs_background": "light" |
|
|
651 |
}, |
|
|
652 |
"output_type": "display_data" |
|
|
653 |
} |
|
|
654 |
], |
|
|
655 |
"source": [ |
|
|
656 |
"fig = plt.figure(figsize=(12,10))\n", |
|
|
657 |
"plt.grid()\n", |
|
|
658 |
"plt.boxplot(fold_vacc)\n", |
|
|
659 |
"plt.suptitle('Cross-Validation Accuracy\\n basic CNN')\n", |
|
|
660 |
"ax = plt.gca()\n", |
|
|
661 |
"plt.xlabel('Patient id')\n", |
|
|
662 |
"plt.ylabel('Accuracy')\n", |
|
|
663 |
"plt.savefig('fig.png')\n", |
|
|
664 |
"plt.show()" |
|
|
665 |
] |
|
|
666 |
}, |
|
|
667 |
{ |
|
|
668 |
"cell_type": "markdown", |
|
|
669 |
"metadata": {}, |
|
|
670 |
"source": [ |
|
|
671 |
"# Image Time Window" |
|
|
672 |
] |
|
|
673 |
}, |
|
|
674 |
{ |
|
|
675 |
"cell_type": "markdown", |
|
|
676 |
"metadata": {}, |
|
|
677 |
"source": [ |
|
|
678 |
"## CNN Parallel Model (A)" |
|
|
679 |
] |
|
|
680 |
}, |
|
|
681 |
{ |
|
|
682 |
"cell_type": "code", |
|
|
683 |
"execution_count": 8, |
|
|
684 |
"metadata": {}, |
|
|
685 |
"outputs": [], |
|
|
686 |
"source": [ |
|
|
687 |
"class ParallelCNN(nn.Module):\n", |
|
|
688 |
" def __init__(self):\n", |
|
|
689 |
" super(ParallelCNN, self).__init__()\n", |
|
|
690 |
" self.conv1 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
691 |
" self.conv2 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
692 |
" self.conv3 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
693 |
" self.conv4 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
694 |
" self.conv5 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
695 |
" self.conv6 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
696 |
" self.conv7 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
697 |
" self.conv8 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
698 |
" self.conv9 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
699 |
" self.conv10 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
700 |
" self.conv11 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
701 |
" self.conv12 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
702 |
" self.conv13 = nn.Conv2d(3,3,3, padding=1)\n", |
|
|
703 |
" self.conv14 = nn.Conv2d(3,3,5, padding=2)\n", |
|
|
704 |
" self.pool = nn.MaxPool2d(2,2)\n", |
|
|
705 |
" self.fc1 = nn.Linear(3549,512)\n", |
|
|
706 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
707 |
" self.max = nn.Softmax()\n", |
|
|
708 |
" \n", |
|
|
709 |
" def forward(self, x):\n", |
|
|
710 |
" batch_size = x.shape[0]\n", |
|
|
711 |
" x[:,0] = F.relu(self.conv1(x[:,0]))\n", |
|
|
712 |
" x[:,1] = F.relu(self.conv3(x[:,1]))\n", |
|
|
713 |
" x[:,2] = F.relu(self.conv5(x[:,2]))\n", |
|
|
714 |
" x[:,3] = F.relu(self.conv7(x[:,3]))\n", |
|
|
715 |
" x[:,4] = F.relu(self.conv9(x[:,4]))\n", |
|
|
716 |
" x[:,5] = F.relu(self.conv11(x[:,5]))\n", |
|
|
717 |
" x[:,6] = F.relu(self.conv13(x[:,6]))\n", |
|
|
718 |
" x[:,0] = F.relu(self.conv2(x[:,0]))\n", |
|
|
719 |
" x[:,1] = F.relu(self.conv4(x[:,1]))\n", |
|
|
720 |
" x[:,2] = F.relu(self.conv6(x[:,2]))\n", |
|
|
721 |
" x[:,3] = F.relu(self.conv8(x[:,3]))\n", |
|
|
722 |
" x[:,4] = F.relu(self.conv10(x[:,4]))\n", |
|
|
723 |
" x[:,5] = F.relu(self.conv12(x[:,5]))\n", |
|
|
724 |
" x[:,6] = F.relu(self.conv14(x[:,6]))\n", |
|
|
725 |
" x = x[:,:,:,3:29,3:29]\n", |
|
|
726 |
" x = x.reshape(batch_size, x.shape[2], x.shape[1]*x.shape[3],-1) # img reshape\n", |
|
|
727 |
" x = self.pool(x)\n", |
|
|
728 |
" x = x.view(batch_size,-1)\n", |
|
|
729 |
" x = self.fc1(x)\n", |
|
|
730 |
" x = self.fc2(x)\n", |
|
|
731 |
" x = self.max(x)\n", |
|
|
732 |
" return x" |
|
|
733 |
] |
|
|
734 |
}, |
|
|
735 |
{ |
|
|
736 |
"cell_type": "code", |
|
|
737 |
"execution_count": 10, |
|
|
738 |
"metadata": { |
|
|
739 |
"collapsed": true, |
|
|
740 |
"jupyter": { |
|
|
741 |
"outputs_hidden": true |
|
|
742 |
} |
|
|
743 |
}, |
|
|
744 |
"outputs": [ |
|
|
745 |
{ |
|
|
746 |
"name": "stderr", |
|
|
747 |
"output_type": "stream", |
|
|
748 |
"text": [ |
|
|
749 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:45: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
750 |
] |
|
|
751 |
}, |
|
|
752 |
{ |
|
|
753 |
"name": "stdout", |
|
|
754 |
"output_type": "stream", |
|
|
755 |
"text": [ |
|
|
756 |
"Begin Training Fold 1/5\t of Patient 1\n", |
|
|
757 |
"[1, 30] loss: 1.414\tAccuracy : 0.517\t\tval-loss: 1.359\tval-Accuracy : 0.459\n", |
|
|
758 |
"[6, 30] loss: 1.117\tAccuracy : 0.748\t\tval-loss: 1.088\tval-Accuracy : 0.757\n", |
|
|
759 |
"[11, 30] loss: 0.913\tAccuracy : 0.830\t\tval-loss: 0.968\tval-Accuracy : 0.784\n", |
|
|
760 |
"[16, 30] loss: 0.862\tAccuracy : 0.830\t\tval-loss: 0.925\tval-Accuracy : 0.811\n" |
|
|
761 |
] |
|
|
762 |
}, |
|
|
763 |
{ |
|
|
764 |
"ename": "KeyboardInterrupt", |
|
|
765 |
"evalue": "", |
|
|
766 |
"output_type": "error", |
|
|
767 |
"traceback": [ |
|
|
768 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
769 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", |
|
|
770 |
"\u001b[1;32m<ipython-input-10-a0847679b8ae>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 36\u001b[0m \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 37\u001b[0m \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 38\u001b[1;33m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 39\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
771 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[0mproducts\u001b[0m\u001b[1;33m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 165\u001b[0m \"\"\"\n\u001b[1;32m--> 166\u001b[1;33m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 168\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
772 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\autograd\\__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[0;32m 97\u001b[0m Variable._execution_engine.run_backward(\n\u001b[0;32m 98\u001b[0m \u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 99\u001b[1;33m allow_unreachable=True) # allow_unreachable flag\n\u001b[0m\u001b[0;32m 100\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 101\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
773 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m: " |
|
|
774 |
] |
|
|
775 |
} |
|
|
776 |
], |
|
|
777 |
"source": [ |
|
|
778 |
"p = 0\n", |
|
|
779 |
"n_fold = 5 \n", |
|
|
780 |
"n_patient = len(np.unique(Patient))\n", |
|
|
781 |
"fold_vloss = np.zeros((n_fold,n_patient))\n", |
|
|
782 |
"fold_loss = np.zeros((n_fold,n_patient))\n", |
|
|
783 |
"fold_vacc = np.zeros((n_fold,n_patient))\n", |
|
|
784 |
"fold_acc = np.zeros((n_fold,n_patient))\n", |
|
|
785 |
"for patient in np.unique(Patient):\n", |
|
|
786 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
787 |
"\n", |
|
|
788 |
" length = len(id_patient)\n", |
|
|
789 |
" \n", |
|
|
790 |
" train_id, test_id = kfold(length,n_fold)\n", |
|
|
791 |
" \n", |
|
|
792 |
" for fold in range(n_fold):\n", |
|
|
793 |
" X_train = tmp[id_patient[train_id[fold]]]\n", |
|
|
794 |
" X_test = tmp[id_patient[test_id[fold]]]\n", |
|
|
795 |
" y_train = Label[id_patient[train_id[fold]]]\n", |
|
|
796 |
" y_test = Label[id_patient[test_id[fold]]] \n", |
|
|
797 |
"\n", |
|
|
798 |
" print(\"Begin Training Fold %d/%d\\t of Patient %d\" % \n", |
|
|
799 |
" (fold+1,n_fold, patient))\n", |
|
|
800 |
"\n", |
|
|
801 |
" CNN = ParallelCNN().cuda(0)\n", |
|
|
802 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
803 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n", |
|
|
804 |
"\n", |
|
|
805 |
" n_epochs = 30\n", |
|
|
806 |
" for epoch in range(n_epochs):\n", |
|
|
807 |
" running_loss = 0.0\n", |
|
|
808 |
" batchsize = 4\n", |
|
|
809 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
810 |
" optimizer.zero_grad()\n", |
|
|
811 |
"\n", |
|
|
812 |
" # forward + backward + optimize\n", |
|
|
813 |
" outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n", |
|
|
814 |
" loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n", |
|
|
815 |
" loss.backward()\n", |
|
|
816 |
" optimizer.step()\n", |
|
|
817 |
" running_loss += loss.item()\n", |
|
|
818 |
"\n", |
|
|
819 |
" #acc\n", |
|
|
820 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n", |
|
|
821 |
" acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n", |
|
|
822 |
"\n", |
|
|
823 |
" #val Loss\n", |
|
|
824 |
" val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n", |
|
|
825 |
" val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n", |
|
|
826 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
827 |
" val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n", |
|
|
828 |
"\n", |
|
|
829 |
" if epoch%5==0:\n", |
|
|
830 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
831 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
832 |
" fold_vloss[fold, p ] = val_loss.item()\n", |
|
|
833 |
" fold_loss[fold, p] = running_loss/i\n", |
|
|
834 |
" fold_vacc[fold, p] = val_acc\n", |
|
|
835 |
" fold_acc[fold, p] = acc\n", |
|
|
836 |
" print('Finish Training Fold %d/%d\\t of Patient %d' % \n", |
|
|
837 |
" (fold+1,n_fold, patient))\n", |
|
|
838 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
839 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
840 |
" \n", |
|
|
841 |
" p = p + 1" |
|
|
842 |
] |
|
|
843 |
}, |
|
|
844 |
{ |
|
|
845 |
"cell_type": "markdown", |
|
|
846 |
"metadata": {}, |
|
|
847 |
"source": [ |
|
|
848 |
"### Peresented Results" |
|
|
849 |
] |
|
|
850 |
}, |
|
|
851 |
{ |
|
|
852 |
"cell_type": "code", |
|
|
853 |
"execution_count": 91, |
|
|
854 |
"metadata": {}, |
|
|
855 |
"outputs": [ |
|
|
856 |
{ |
|
|
857 |
"data": { |
|
|
858 |
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAtAAAAKUCAYAAAAtng/mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdfZyld10f/M/XXR6UhLBLMAIBEgXpyNYHSBFl9Z5xfQjcCFVKyyAIdJT2fkm0YqngeMNCO61PVVul9kZGAQMTAdFGTAVKZtRVqTwINGREQ8pDBCWQJSGghV1+9x/nLEwm+zC/2TlzZs6+36/Xee2c61zXub7fOTNnP/M7v+u6qrUWAABgY75k3AUAAMBuIkADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABRqCqnlFVR9bcv72qvnIj625iX/+9qp6+2e0B6CNAAztCVT2lqt4+DJofHYbCg2Oq5e5V9cmq+raTPPYLVfW63udsrZ3XWrtxC2o7XFVXrnvux7TWXnG2z32GfbaqeuSo9gGwmwjQwNhV1XOS/GKSf5/koiQPTPJfkjzhFOvvHWU9rbW/T/KbSb5/3X73JJlNMrKwutNUVSV5WpJbkmzrKHcN+H8K2HG8MQFjVVUXJHlxkh9qrb2+tfbp1trnWmu/21p77nCdw1X1uqq6sqpuS/KMqrpbVf1iVX1kePvFqrrbcP0Lq+oNw1HkW6rqj04Esar68ar666r6VFW9r6oOnaK0VyR5YlV92Zpl35XB++Z/Hz7X86rq/cPnur6qvuc0fbaqevDw63tX1dVVdVtV/VmSr1q37n+qqg8PH39HVX3LcPnlSX4iyT8bjtS/e7h8pap+YPj1l1TVT1bVB6vqY1X1yuH3OFV1ybCOp1fVh6rq41U1f4aX6FuS3C/JjyR5clXddV2tP1hVq2u+Bw8fLn9AVb2+qm6uqk9U1S8Pl99hBH1NTXvX9LJQVX+c5DNJvrKqnrlmHzdW1b9YV8MTqupdw+/X+6vq8qp6UlW9Y916P1ZVv3OGfgHOSIAGxu2bktw9yW+fYb0nJHldknsleVWS+SSPSvL1Sb4uySOT/ORw3R9LclOS+2Qwov0TSVpVPTTJs5P8o9ba+RkE4g+cbGettT9J8tEk37tm8dOSvLq1dmx4//0ZBMwLkrwoyZVVdd8N9PySJH+f5L5J/vnwttbbhn3tT/LqJK+tqru31n4/g1H63xxOCfm6kzz3M4a3mSRfmeS8JL+8bp2DSR6a5FCSF1TV1GlqfXqS381gRD5JHnfigap6UpLDGYzU3zPJ45N8YjhS/4YkH0xySZL7J7nqNPtY72lJnpXk/OFzfGy433smeWaSX1gT1B+Z5JVJnpvBz8a3ZvCaXp3k0nW9PTXJb3TUAXBSAjQwbvdO8vE1ofRU/rS19juttc+31v4uyfcleXFr7WOttZszCLBPG677uQzC6YOGo9l/1FprSY4nuVuSr6mqu7TWPtBae/9p9vnKDKdxVNU9MwjxX5i+0Vp7bWvtI8OafjPJX2UQ5E9pGC6fmOQFw9H267JuSkhr7crW2idaa8daa/9xWPNDz/D9OeH7kvx8a+3G1trtSZ6fwcjx2mkvL2qt/V1r7d1J3p3BHyAnq/XLkjwpgz8aPpfBHzBrp3H8QJKfaa29rQ3c0Fr74PB7cL8kzx32+PettZ6DJF/eWnvvsP/PtdZ+r7X2/uE+/iDJmzL4wyVJ5pL8WmvtzcPX4a9ba3/RWvs/GYT+pw57eVgGYf4NHXUAnJQADYzbJ5JcuIF5zR9ed/9+GYxOnvDB4bIk+dkkNyR50/Aj/+clSWvthiT/KoNR049V1VVVdb/kC2fJOHF74PB5Xplkpqrun+SfJLmhtfbnJ3ZYVd8/nDrwyar6ZJIDSS48Qx/3SbJ3XT9r+zgx1WC1qm4dPu8FG3jeE072fdmbwUj8CX+z5uvPZDBKfTLfk+RYkmuG91+V5DFVdZ/h/QdkMAq/3gOSfHADfxSdyh1e66p6TFW9dTgd55NJHpsvfj9OVUMy+MPkKVVfmMf9mmGwBjgrAjQwbn+awXSGf3yG9dq6+x9J8qA19x84XJbW2qdaaz/WWvvKJN+d5Dkn5jq31l7dWjs43LYl+enh8vPW3D40XPahJH+Uwaju0zII1EmSqnpQkl/NYErIvVtr90pyXZI6Qx83ZxBKH7Cu9hPP+y1JfjzJP02yb/i8t6553vXfh/VO9n05luRvz7DdyTw9g3D9oar6mySvTXKXDA6kTAZB96tOst2HkzzwFH8UfTrJ2nnlX3GSdb7QYw3mtf9Wkp9LctHw+3FNvvj9OFUNaa29NclnMxitfkpM3wC2iAANjFVr7dYkL0jykqr6x1X1ZVV1l+Go48+cZtOlJD9ZVfepqguHz3FlklTV46rqwcORx9symLpxvKoeWlXfNgxlf5/k74aPnc4rMgjJj85gBPaEe2QQ9G4e7vOZGYxAn6nf40len+TwsNevyR2nRZyfQeC9OcneqnpBBnN/T/jbJJfUqc9OsZTkR6vq0qo6L1+cM901GjwcdT+Uwdzjr88X55r/9Jp6X5bkX1fVI2rgwcM/LP4sg/njP1VV96jBaQEfPdzmXUm+taoeODy48flnKOWuGUxhuTnJsap6TJLvXPP4YpJnVtWhGhxAef+q+gdrHn9lBnPAj3VOIwE4JQEaGLvW2s8neU4GBwHenMGo4rOTnO6MCf8uyduTvCfJ/0ryzuGyJHlIkv+R5PYMRrj/S2ttJYMg9lNJPp7BNIYvz+AAw9N5XZJ9Sd7SWvvompqvT/Ifh8//t0n+YZI/3ki/w97OG9bw8iS/vuaxN2Zwlo+/zGD6xd/njlMaXjv89xNV9c6TPPevZTDS+odJ/vdw+ys2WNdaT0vyrtbam1prf3PiluQ/J/naqjrQWnttkoUMDnT8VAav1/7hHwnfneTBST6UwQGd/yxJWmtvzmBu8nuSvCNnmJPcWvtUkh9O8pokRzMYSb56zeN/luGBhRmM1P9B7jgC/xsZ/GFj9BnYMjU4rgYAJk9VfWkGZ/F4eGvtr8ZdDzAZjEADMMn+nyRvE56BrTTSq3kBwLhU1QcyONjwTAeoAnQxhQMAADqYwgEAAB0EaIAdrKo+UFXfPvz6cFVducHtVqrqB0ZbHcC5SYAG6FRVrao+Pbxq4V9X1c8PL9G9a1TVV1fVa6vq48MrHr6nqp5TVXuq6pJhj7+3bpsrq+rw8Ovp4TovWbfOkap6xvZ1ArD9BGiAzfm61tp5GVxs5ClJfrD3CTZw+fKRqKqvSvI/Mzi/9D9srV2Q5ElJLsvgQi4nPGrNBVBO5tNJvr+qLhlRqQA7kgANcBZaa3+RweW+DyRJVT2vqt5fVZ+qquur6ntOrFtVz6iqP66qX6iqWzK4GuFXVdW1VfWJ4Wjwq6rqXhvZd1U9qqr+pKo+WVXvrqrpDZb9oiR/0lp7zomLw7TW3tdae0pr7ZNr1vuZfPHiNCfzyQwuBPPCDe4XYCII0ABnYXgp7m9J8ufDRe8f3r8gg6B6ZVXdd80m35jkxgyugriQwWnW/kOS+yWZSvKAJIc3sN/7J/m9DALu/iT/OslvVdV9NlD2t2dwhcUzeUmSrz4xB/sUFpI8saoeuoHnA5gIAjTA5ryzqo4m+d0kL8vwctyttde21j7SWvt8a+03k/xVkkeu2e4jrbVfaq0da639XWvthtbam1tr/6e1dnOSn0/yf21g/09Nck1r7Zrhvt6cwaXNH7uBbe+d5KNnXGtwGfCFnGYUenh57/+a5MUbeD6AieBCKgCb8/DW2g3rF1bV9yd5TpJLhovOS3LhmlU+vG79L0/ynzMYtT4/g4GNoxvY/4OSPKmqvnvNsrskWd7Atp9Ict8zrjXwq0meu24/6/10kvdX1ddt8DkBdjUj0ABbpKoelEHgfHaSe7fW7pXkugymaZyw/upV/2G47Gtba/fMYGS5cmYfTvIbrbV7rbndo7X2UxvY9n8keeIG1ktr7XMZTEX5t6eqq7X2iSS/OFwHYOIJ0ABb5x4ZhOGbk6SqnpnhwYWncX6S25N8cjiv+bkb3NeVSb67qr5reOq5uw9PLXfxBrZ9YZJvrqqfraqvGNb64OFp6k52AONvJLlbkstP85w/n+SbM5jHDTDRBGiALdJauz7Jf0zyp0n+Nsk/TPLHZ9jsRUkenuTWDA4KfP0G9/XhJE9I8hMZBPYPZxC+z/i+3lp7f5JvymCayXur6tYkv5XBHOpPnWT94xmE7v2nec7bMjhrxynXAZgU1dr6TxMBAIBTMQINAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBh77gL6HXhhRe2Sy65ZOT7+fSnP5173OMeI9/PdpikXpLJ6meSekkmq59J6iWZrH4mqZdksvqZpF6SyepnknpJtq+fd7zjHR9vrd1n/fJdF6AvueSSvP3tbx/5flZWVjI9PT3y/WyHSeolmax+JqmXZLL6maReksnqZ5J6SSarn0nqJZmsfiapl2T7+qmqD55suSkcAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCgw8gCdFX9WlV9rKquO8XjVVX/uapuqKr3VNXDR1ULAABslVGOQL88yeWnefwxSR4yvD0rya+MsBYAANgSIwvQrbU/THLLaVZ5QpJXtoG3JrlXVd13VPUAAMBWqNba6J686pIkb2itHTjJY29I8lOttSPD+29J8uOttbefZN1nZTBKnYsuuugRV1111chqPuH222/PeeedN/L9bIdJ6iXZnf3MzMxsarvl5eUtruTsTVIvp7Mbf85OZ5L6maReksnqZ5J6SSarn0nqJdm+fmZmZt7RWrts/fK9I9/zqdVJlp00zbfWXprkpUly2WWXtenp6RGWNbCyspLt2M92mKRekt3Zz6n+UK2qUz62U52u3t3Yz6nsxp+z05mkfiapl2Sy+pmkXpLJ6meSeknG3884z8JxU5IHrLl/cZKPjKkWAADYkHEG6KuTfP/wbByPSnJra+2jY6wHAADOaGRTOKpqKcl0kgur6qYkL0xylyRprf3XJNckeWySG5J8JskzR1ULAABslZEF6Nba7Bkeb0l+aFT7BwCAUXAlQgAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADnvHXQAAAKxXVZvarrW2xZXcmQANAMCOc7ogXFXbEpRPxRQOAADoIEADAEAHARo4qf3796equm5JurfZv3//mDsFgD4CNHBSR48eTWut67a8vNy9zdGjR8fdKgB0EaABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgBgbPbv35+q6rol6d5m//79W1azAA0AwNgcPXo0rbWu2/Lycvc2R48e3bKa927ZMzF2J/4i69Va2+JKAFjL+zNMFgF6gpzqjbaqvAkDjNHp3oO9R8PuYwoHAAB0MAINW2T//v2bml/V+9Huvn37csstt3TvB3Yb0x6AncoINGyR3XgQBOxkp/odON1jwjOwHYxAAyfVXnjP5PAFXdtMJ8nKJvYDALuIAA2c3OFbuzdZWVnJ9PT01tcCADuIKRwAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADnvHXQAAAOeu9sJ7Jocv6NpmOklWNrGfLSJAAwAwPodv7d5kZWUl09PTW1/LBpnCAQAAHQRoAADoIEADwBbZv39/qqrrlqR7m/3794+5Uzi3CdAAsEWOHj2a1lrXbXl5uXubo0ePjrtVOKc5iBAAYJfZv3//yP+Q2rdvX2655ZaR7mO3MgINALDL9H7a4ZOOrWUEGrbIbjyPJfnCHNRerbUtruTctZmRtN7XzUgasJUEaNgi9aLbukPVZs5jWVVph7s24TRO9ZpVlZC8TU6MpG3UZn9vALaKKRwAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBAh73jLoA++/fvz9GjR7u3q6qu9fft25dbbrmlez8A57L2wnsmhy/o2mY6SVY2sR9gbAToXebo0aNprXVts7Kykunp6a5tegM3AEm96LZte49uh7s2AbbQSAN0VV2e5D8l2ZPkZa21n1r3+IOS/FqS+yS5JclTW2s3jbImAIDdrvfTjunEJx1baGQBuqr2JHlJku9IclOSt1XV1a2169es9nNJXtlae0VVfVuS/5DkaaOqCQBgIhy+tWv1qur+dIRTG+VBhI9MckNr7cbW2meTXJXkCevW+Zokbxl+vXySxwEAYEcZ5RSO+yf58Jr7NyX5xnXrvDvJEzOY5vE9Sc6vqnu31j6xdqWqelaSZyXJRRddlJWVlVHV/AW33377tuxnM3rr2mwvO7X/nfzabMfc8fPPP3/H9f+Wt7wlV155ZT70oQ/lgQ98YJ761Kfm0KFD4y7rCx7/+MfnU5/6VPd2va/n+eefn6uvvrp7P9thJ//eJH3vNzv9Pe1cfo/e6T9nvSatn0nqZeyvTWttJLckT8pg3vOJ+09L8kvr1rlfktcn+fMMQvRNSS443fM+4hGPaNtheXl5W/bTa/CS9dlML5vZz3bZqa/NZkxCL69+9avbpZde2q699tr25je/uV177bXt0ksvba9+9avHXdoX+L3Z2T9rvd+3nfzanOs/azv552wzJqmfnfozs1nb9dokeXs7SR4d5RSOm5I8YM39i5N8ZO0KrbWPtNa+t7X2DUnmh8v6JvUAY7WwsJDFxcXMzMxk7969mZmZyeLiYhYWFsZdGgCMxCgD9NuSPKSqLq2quyZ5cpI7fLZZVRdW1Ykanp/BGTmAXWR1dTUHDx68w7KDBw9mdXV1TBUBwGiNLEC31o4leXaSNyZZTfKa1tp7q+rFVfX44WrTSd5XVX+Z5KIkhqxgl5mamsqRI0fusOzIkSOZmpoaU0UAMFojPQ90a+2aJNesW/aCNV+/LsnrRlkDMFrz8/OZm5vL4uJijh8/nuXl5czNzZnCAcDEciVC4KzMzs4mSa644oqsrq5mamoqCwsLX1gOAJNGgAbO2uzsbGZnZzd1SWK2zmZPo9hcXAGgiwANMCFOF4RdhQxg64zyLBwAADBxzukRaB93AgDQ65wegT7ZlWVO3E73OADwRUtLSzlw4EAOHTqUAwcOZGlpadwlwUid0yPQAMDZWVpayvz8/BdOZblnz57Mzc0libPxMLHO6RFoAODsLCwsZHFxMTMzM9m7d29mZmayuLjoXPBMNAEaANi01dXVHDx48A7LDh48mNXV1TFVBKMnQAMAmzY1NZUjR47cYdmRI0cyNTU1popg9ARoAGDT5ufnMzc3l+Xl5Rw7dizLy8uZm5vL/Pz8uEuDkXEQIQCwaScOFLziiiuyurqaqampLCwsOICQiSZAAwBnZXZ2NrOzs1lZWcn09PS4y4GRM4UDAAA6CNAAANBBgAYAgA4CNAAAdHAQ4S7TXnjP5PAFXdtMJ8nKJvYDAMCdCNC7TL3otrTWurbZzFHRVZV2uGsTAIBzgikcAADQQYAGAIAOpnAAAEyIqtrUY73TQ891RqABACZEa+2kt+Xl5VM+Jjz3E6ABAKCDKRwAjFXv6TmnE6fmBMZKgGbXWFpaysLCQlZXVzM1NZX5+fnMzs6OuyzgbB2+tWv1qvKRMzBWAjS7wtLSUubn57O4uJjjx49nz549mZubSxIhGgDYVuZAsyssLCxkcXExMzMz2bt3b2ZmZrK4uJiFhYVxlwYAnGMEaHaF1dXVHDx48A7LDh48mNXV1TFVBACcqwRodoWpqakcOXLkDsuOHDmSqampMVUEAJyrBGh2hfn5+czNzWV5eTnHjh3L8vJy5ubmMj8/P+7SAIBzjIMI2RVOHCh4xRVXfOEsHAsLCw4gBAC2nQDNrjE7O5vZ2dmsrKxkenp63OXA+HScM/mE3nMtf3FffaeYAzgXCNAAu0y96Lbu8yBv5g/Pqko73LUJwDnBHGgAAOggQAMAQAcBGgAAOpwTAXr//v2pqq5bku5t9u/fP+ZOAQAYtXMiQB89ejStta7b8vJy9zZHjx4dd6sAAIzYORGgAQBgqwjQAADQQYAGAIAOLqQCAJyzTpw4oFfvxYyYLAI0AHDOOl0QripBmZMyhQMAADoI0AAA0MEUjl1os/O1euzbt2/k+wCYRN6jYfIJ0LvMZuZimcMFsD028167srKS6enprS8GGBlTOAAAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADs4DDUy89sJ7Jocv6NpmOklWNrEfACaeAA1MvHrRbd0XuNjMxS2qKu1w1yYA7EKmcAAAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA67B13AWydqtrUY621UZQDADCRjEBPkNbaSW/Ly8unfEx4BgDoI0ADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKBhDJaWlnLgwIEcOnQoBw4cyNLS0rhLAgA2yHmgYZstLS1lfn4+i4uLOX78ePbs2ZO5ubkkyezs7JirAwDOxAg0bLOFhYUsLi5mZmYme/fuzczMTBYXF7OwsDDu0gCADTACDdtsdXU1Bw8evMOygwcPZnV1dUwVwc7k6qrATmUEGrbZ1NRUjhw5codlR44cydTU1Jgqgp3J1VWBnUqAhm02Pz+fubm5LC8v59ixY1leXs7c3Fzm5+fHXRoAsAGmcMA2O3Gg4BVXXJHV1dVMTU1lYWHBAYTAznP4gq7Vp5NkZTP7uXUTG8H4CNAwBrOzs5mdnc3Kykqmp6fHXQ7AyXUGW+9pnCtM4QAAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGACbe/v37U1VdtyTd2+zfv3/MnbIdBGgAYOIdPXo0rbWu2/Lycvc2R48eHXerbAMBGgAAOpwxQFfVs6tq33YUAwAAO93eDazzFUneVlXvTPJrSd7YWmujLWtrtRfeMzl8Qdc200myson9AAAw0c4YoFtrP1lV/2+S70zyzCS/XFWvSbLYWnv/qAvcEodv7d6kqrLL/k4AAGAbbGgO9HDE+W+Gt2NJ9iV5XVX9zAhrAwCAHeeMI9BV9cNJnp7k40leluS5rbXPVdWXJPmrJP9mtCUCAMDOsZE50Bcm+d7W2gfXLmytfb6qHjeasgAAYGfayBSOa5LccuJOVZ1fVd+YJK211VEVBgAAO9FGAvSvJLl9zf1PD5cBAMA5ZyMButaetq619vlsbOoHAABMnI0E6Bur6oer6i7D248kuXHUhQEAwE60kQD9L5N8c5K/TnJTkm9M8qxRFgUAADvVGQN0a+1jrbUnt9a+vLV2UWvtKa21j23kyavq8qp6X1XdUFXPO8njD6yq5ar686p6T1U9djNNAADjs7S0lAMHDuTQoUM5cOBAlpaWxl0SjNRGzgN99yRzSR6W5O4nlrfW/vkZttuT5CVJviODkeu3VdXVrbXr16z2k0le01r7lar6mgzO+HFJbxMAwHgsLS1lfn4+i4uLOX78ePbs2ZO5ubkkyezs7Jirg9HYyBSO30jyFUm+K8kfJLk4yac2sN0jk9zQWruxtfbZJFclecK6dVqSew6/viDJRzZSNACwMywsLGRxcTEzMzPZu3dvZmZmsri4mIWFhXGXBiOzkbNpPLi19qSqekJr7RVV9eokb9zAdvdP8uE190/Mn17rcJI3VdUVSe6R5NtP9kRV9awM511fdNFFWVlZ2cDuz9527WfUbr/99onpJZmsfiapl2Rn99Nb12Z72anvTzu9nx47+edsM3Z7P6urqzl+/HhWVla+0Mvx48ezurq64/rye7My7jK2zNj7aa2d9pbkz4b//mGSAxlcmfDGDWz3pCQvW3P/aUl+ad06z0nyY8OvvynJ9Um+5HTP+4hHPKJth8G3ZjIsLy+Pu4QtNUn9TFIvre3cfjbz+7yZXrbrfWPS+um1U3/ONmu39/Owhz2sXXvtta21L/Zy7bXXtoc97GFjrOrO/N4sj7uELbVd/SR5eztJHt3IFI6XVtW+DOYrXz0MuT+9ge1uSvKANfcvzp2naMwlec0wyP9pBnOsL9zAcwMAO8D8/Hzm5uayvLycY8eOZXl5OXNzc5mfnx93aTAyp53CUVVfkuS21trRDEagv7Ljud+W5CFVdWkGp8B7cpKnrFvnQ0kOJXl5VU1lEKBv7tgHADBGJw4UvOKKK7K6upqpqaksLCw4gJCJdtoR6C7o5GAAABZxSURBVDa46uCzN/PErbVjw23fmGQ1g7NtvLeqXlxVjx+u9mNJfrCq3p1kKckzhsPlAMAuMTs7m+uuuy5vectbct111wnPTLyNHET45qr610l+M8mnTyxsrd1ypg1ba9dkcGq6tctesObr65M8esPVAgDAmG0kQJ843/MPrVnW0jedAwAAJsIZA3Rr7dLtKAQAAHaDjVyJ8PtPtry19sqtLwcAAHa2jUzh+Edrvr57BmfNeGcSARoAgHPORqZwXLH2flVdkMHlvQEA4JyzkRHo9T6T5CFbXQgAwKi0F94zOXxB1zbTSbKyif0w8TYyB/p3MzjrRjI4b/TXZHj1QACA3aBedFt6LzWxsrKS6enpvv1UpR3u2oRdaCMj0D+35utjST7YWrtpRPUAAMCOtpEA/aEkH22t/X2SVNWXVtUlrbUPjLQyAADYgU57Ke+h1yb5/Jr7x4fLAADgnLORAL23tfbZE3eGX991dCUBAMDOtZEAfXNVPf7Enap6QpKPj64kAADYuTYyB/pfJnlVVf3y8P5NSU56dUIAAJh0G7mQyvuTPKqqzktSrbVPjb6s7VFVm3q89zQ4AABMjjNO4aiqf19V92qt3d5a+1RV7auqf7cdxY1aa+2Ut+Xl5VM+BgDAuWsjc6Af01r75Ik7rbWjSR47upIAAGDn2kiA3lNVdztxp6q+NMndTrM+AABMrI0cRHhlkrdU1a8P7z8zyStGVxIAAOxcGzmI8Geq6j1Jvj1JJfn9JA8adWEAALATbWQKR5L8TQZXI3xikkNJVkdWEQAA7GCnHIGuqq9O8uQks0k+keQ3MziN3cw21QYAADvO6aZw/EWSP0ry3a21G5Kkqn50W6oCAIAd6nRTOJ6YwdSN5ar61ao6lMEcaAAAOGedMkC31n67tfbPkvyDJCtJfjTJRVX1K1X1ndtUHwAA7ChnPIiwtfbp1tqrWmuPS3Jxkncled7IKwMAgDWWlpZy4MCBHDp0KAcOHMjS0tJY6tjIeaC/oLV2S5L/b3gDAIBtsbS0lPn5+SwuLub48ePZs2dP5ubmkiSzs7PbWstGT2MHAABjs7CwkMXFxczMzGTv3r2ZmZnJ4uJiFhYWtr0WARoAgB1vdXU1Bw8evMOygwcPZnV1+y9PIkADALDjTU1N5ciRI3dYduTIkUxNTW17LQI0AAA73vz8fObm5rK8vJxjx45leXk5c3NzmZ+f3/Zaug4iBACAcThxoOAVV1yR1dXVTE1NZWFhYdsPIEwEaAAAdonZ2dnMzs5mZWUl09PTY6vDFA4AAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHRwGjuAXaiqRr6Pffv2jXwfALuRAA2wy7TWurepqk1tB8CdmcIBAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBh77gLAADYDlU18n3s27dv5Ptg/ARoxuvwBd2bTCfJymb2desmNgJgErTWurepqk1tx+QToBmretFt3W9OKysrmZ6e7ttPVdrhrk0AAE7KHGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHRwGjuACXGmi0Sc6nHnuQXoYwQaYEK01k55W15ePuVjAPQRoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCgw95xFwAAMC5VtanHW2ujKIddwgg0AHDOaq2d8ra8vHzKxzi3CdAAANBBgAYAgA7mQDN2Z5p/thX27ds38n0AAOcGI9CM1enmnm1mTtqpbrfccsu4WwUAJoQADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCgw0gDdFVdXlXvq6obqup5J3n8F6rqXcPbX1bVJ0dZDwAAnK29o3riqtqT5CVJviPJTUneVlVXt9auP7FOa+1H16x/RZJvGFU9AACwFUY5Av3IJDe01m5srX02yVVJnnCa9WeTLI2wHgAAOGvVWhvNE1f9kySXt9Z+YHj/aUm+sbX27JOs+6Akb01ycWvt+Ekef1aSZyXJRRdd9IirrrpqJDWvdfvtt+e8884b+X62wyT1kkxWP5PUS7Jz+5mZmdmW/Zx//vm5+uqrt2VfvXbqa7MZk9RLMln9TFIvyWT1M0m9JNvXz8zMzDtaa5etXz6yKRxJ6iTLTpXWn5zkdScLz0nSWntpkpcmyWWXXdamp6e3pMDTWVlZyXbsZztMUi/JZPUzSb0kO7efzQwU7NReNmuS+pmkXpLJ6meSekkmq59J6iUZfz+jnMJxU5IHrLl/cZKPnGLdJ8f0DQAAdoFRBui3JXlIVV1aVXfNICTf6bPNqnpokn1J/nSEtQAAwJYYWYBurR1L8uwkb0yymuQ1rbX3VtWLq+rxa1adTXJVG9VkbAAA2EKjnAOd1to1Sa5Zt+wF6+4fHmUNAACwlVyJEAAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAEAHARoAADoI0AAA0EGABgCADgI0AAB0EKABAKCDAA0AAB0EaAAA6CBAAwBAh5EG6Kq6vKreV1U3VNXzTrHOP62q66vqvVX16lHWAwAAZ2vvqJ64qvYkeUmS70hyU5K3VdXVrbXr16zzkCTPT/Lo1trRqvryUdUDAABbYZQj0I9MckNr7cbW2meTXJXkCevW+cEkL2mtHU2S1trHRlgPAACctWqtjeaJq/5Jkstbaz8wvP+0JN/YWnv2mnV+J8lfJnl0kj1JDrfWfv8kz/WsJM9KkosuuugRV1111UhqXuv222/PeeedN/L9bIdJ6iWZrH4mqZdksvqZpF6SyepnknpJJqufSeolmax+JqmXZPv6mZmZeUdr7bL1y0c2hSNJnWTZ+rS+N8lDkkwnuTjJH1XVgdbaJ++wUWsvTfLSJLnsssva9PT0lhe73srKSrZjP9thknpJJqufSeolmax+JqmXZLL6maReksnqZ5J6SSarn0nqJRl/P6OcwnFTkgesuX9xko+cZJ3/1lr7XGvtfyd5XwaBGgAAdqRRBui3JXlIVV1aVXdN8uQkV69b53eSzCRJVV2Y5KuT3DjCmgAA4KyMLEC31o4leXaSNyZZTfKa1tp7q+rFVfX44WpvTPKJqro+yXKS57bWPjGqmgAA4GyNcg50WmvXJLlm3bIXrPm6JXnO8AYAADueKxECAEAHARoAADoI0AAA0EGAZtdYWlrKgQMHcujQoRw4cCBLS0vjLgkAOAeN9CBC2CpLS0uZn5/P4uJijh8/nj179mRubi5JMjs7O+bqAIBziRFodoWFhYUsLi5mZmYme/fuzczMTBYXF7OwsDDu0gCAc4wAza6wurqagwcP3mHZwYMHs7q6OqaKAIBzlQDNrjA1NZUjR47cYdmRI0cyNTU1pooAgHOVAM2uMD8/n7m5uSwvL+fYsWNZXl7O3Nxc5ufnx10aAHCOcRAhu8KJAwWvuOKKrK6uZmpqKgsLCw4gBAC2nQDNrjE7O5vZ2dmsrKxkenp63OUAAOcoUzgAAKCDAA0AAB0EaAAA6CBAAwBABwEaAAA6CNAAANBBgAYAgA4CNAAAdBCgAQCggwANAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAGAIAOAjQAAHQQoAEAoIMADQAAHQRoAADoIEADAECHaq2Nu4YuVXVzkg9uw64uTPLxbdjPdpikXpLJ6meSekkmq59J6iWZrH4mqZdksvqZpF6SyepnknpJtq+fB7XW7rN+4a4L0Nulqt7eWrts3HVshUnqJZmsfiapl2Sy+pmkXpLJ6meSekkmq59J6iWZrH4mqZdk/P2YwgEAAB0EaAAA6CBAn9pLx13AFpqkXpLJ6meSekkmq59J6iWZrH4mqZdksvqZpF6SyepnknpJxtyPOdAAANDBCDQAAHQQoAEAoIMAvU5V/VpVfayqrht3LWerqh5QVctVtVpV762qHxl3TZtVVXevqj+rqncPe3nRuGvaClW1p6r+vKreMO5azkZVfaCq/ldVvauq3j7ues5WVd2rql5XVX8x/P35pnHXtBlV9dDha3LidltV/atx13U2qupHh+8B11XVUlXdfdw1bVZV/ciwj/fuxtflZP9fVtX+qnpzVf3V8N9946yxxyn6edLw9fl8Ve2aU8CdopefHb6nvaeqfruq7jXOGnucop9/O+zlXVX1pqq633bWJEDf2cuTXD7uIrbIsSQ/1lqbSvKoJD9UVV8z5po26/8k+bbW2tcl+fokl1fVo8Zc01b4kSSr4y5ii8y01r5+Qs4z+p+S/H5r7R8k+brs0teotfa+4Wvy9UkekeQzSX57zGVtWlXdP8kPJ7mstXYgyZ4kTx5vVZtTVQeS/GCSR2bwM/a4qnrIeKvq9vLc+f/L5yV5S2vtIUneMry/W7w8d+7nuiTfm+QPt72as/Py3LmXNyc50Fr72iR/meT5213UWXh57tzPz7bWvnb4/vaGJC/YzoIE6HVaa3+Y5JZx17EVWmsfba29c/j1pzIIAfcfb1Wb0wZuH969y/C2q4+AraqLk/zfSV427lr4oqq6Z5JvTbKYJK21z7bWPjneqrbEoSTvb61tx5VcR2lvki+tqr1JvizJR8Zcz2ZNJXlra+0zrbVjSf4gyfeMuaYup/j/8glJXjH8+hVJ/vG2FnUWTtZPa221tfa+MZW0aafo5U3Dn7UkeWuSi7e9sE06RT+3rbl7j2xzJhCgzxFVdUmSb0jyP8dbyeYNpzu8K8nHkry5tbZrexn6xST/Jsnnx13IFmhJ3lRV76iqZ427mLP0lUluTvLrw+k1L6uqe4y7qC3w5CRL4y7ibLTW/jrJzyX5UJKPJrm1tfam8Va1adcl+daqundVfVmSxyZ5wJhr2goXtdY+mgwGcZJ8+Zjr4eT+eZL/Pu4izlZVLVTVh5N8X4xAs9Wq6rwkv5XkX637i21Xaa0dH35Uc3GSRw4/At2VqupxST7WWnvHuGvZIo9urT08yWMymCr0reMu6CzsTfLwJL/SWvuGJJ/O7voY+k6q6q5JHp/kteOu5WwM59M+IcmlSe6X5B5V9dTxVrU5rbXVJD+dwcfqv5/k3RlMu4ORqqr5DH7WXjXuWs5Wa22+tfaADHp59nbuW4CecFV1lwzC86taa68fdz1bYfhx+kp291z1Ryd5fFV9IMlVSb6tqq4cb0mb11r7yPDfj2Uwx/aR463orNyU5KY1n3C8LoNAvZs9Jsk7W2t/O+5CztK3J/nfrbWbW2ufS/L6JN885po2rbW22Fp7eGvtWzP4ePqvxl3TFvjbqrpvkgz//diY62GNqnp6kscl+b42WRcCeXWSJ27nDgXoCVZVlcE8ztXW2s+Pu56zUVX3OXHEcFV9aQb/kf7FeKvavNba81trF7fWLsngo/VrW2u7ciStqu5RVeef+DrJd2bw8fSu1Fr7myQfrqqHDhcdSnL9GEvaCrPZ5dM3hj6U5FFV9WXD97dD2aUHeCZJVX358N8HZnCg2iS8Rlcnefrw66cn+W9jrIU1quryJD+e5PGttc+Mu56zte6g28dnmzPB3u3c2W5QVUtJppNcWFU3JXlha21xvFVt2qOTPC3J/xrOHU6Sn2itXTPGmjbrvkleUVV7MvjD7zWttV196rcJclGS3x7kmexN8urW2u+Pt6SzdkWSVw2nPtyY5JljrmfThvNrvyPJvxh3LWertfY/q+p1Sd6ZwUfQf57dfXni36qqeyf5XJIfaq0dHXdBPU72/2WSn0rymqqay+APnieNr8I+p+jnliS/lOQ+SX6vqt7VWvuu8VW5Mafo5flJ7pbkzcP367e21v7l2IrscIp+Hjsc6Ph8kg8m2dZeXMobAAA6mMIBAAAdBGgAAOggQAMAQAcBGgAAOgjQAADQQYAG2CGq6nhVvauqrquq1w5PQXe69X9i3f0/OYt9P6Oq7neKx15cVd9+kuXTVeV0ksA5R4AG2Dn+rrX29a21A0k+mzOf1/QOAbq1djZX5XtGBpfHvpPW2gtaa//jLJ4bYKII0AA70x8leXCSVNXvVNU7quq9/397d8waVRBGYfg9ECEBxUIQQYSIBAI2gYhooVhI0MJKG0mXImU6wVYs9A8IahBSqFVaQVNZCAoqxqQwYCH6BxQUo4iMxZ3VNUQ3VwST+D6wsPvdmdm53WGYYZJM1tplYKCuWN+qtQ+dzknOJXmcZCHJhVobTPIiyXQday7JQJIzwAGay2Pm622fdI01U9uQ5ESSpSQPaG7Pk6T/jgFaktaZJH3ASWCxliZKKaM0IXcqyY5Synl+rFiPr+g/BgwBB4ERYDTJ0fp4CLhSStkPvANOl1JmgSfAeB1v+Rfz6gemgVPAEWDX33trSdo4DNCStH4MJJmnCbNvgBu1PpXkOfAI2EMTgn9nrH6e0Vx7PdzV51UpZb5+fwoMtpjfcO3/sjTX2N5s0VeSNo2+fz0BSdJ3y6WUke5CkmPAceBwKeVjkvtAf49xAlwqpVxbMdYg8Lmr9BX4abvGGpSW7SVp03EFWpLWt+3A2xqeh4FDXc++JNmySp97wESSrQBJdifZ2eN/3gPberRZAvYm2Vd/n+09fUnafAzQkrS+3QX6kiwAF2m2cXRcBxY6hwg7SilzwG3gYZJFYJbe4XgGuLraIcKucT8Bk8Cdeojw9R+8jyRteGm2sUmSJElaC1egJUmSpBYM0JIkSVILBmhJkiSpBQO0JEmS1IIBWpIkSWrBAC1JkiS1YICWJEmSWvgGED8z1eXyZtQAAAAASUVORK5CYII=\n", |
|
|
859 |
"text/plain": [ |
|
|
860 |
"<Figure size 864x720 with 1 Axes>" |
|
|
861 |
] |
|
|
862 |
}, |
|
|
863 |
"metadata": { |
|
|
864 |
"needs_background": "light" |
|
|
865 |
}, |
|
|
866 |
"output_type": "display_data" |
|
|
867 |
} |
|
|
868 |
], |
|
|
869 |
"source": [ |
|
|
870 |
"sio.savemat('Result/Res_ParallelCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n", |
|
|
871 |
"\n", |
|
|
872 |
"fig = plt.figure(figsize=(12,10))\n", |
|
|
873 |
"plt.grid()\n", |
|
|
874 |
"plt.boxplot(fold_vacc)\n", |
|
|
875 |
"plt.suptitle('Cross-Validation Accuracy\\n Parallel CNN')\n", |
|
|
876 |
"ax = plt.gca()\n", |
|
|
877 |
"plt.xlabel('Patient id')\n", |
|
|
878 |
"plt.ylabel('Accuracy')\n", |
|
|
879 |
"plt.savefig('Result/ParallelCNN.png')\n", |
|
|
880 |
"plt.show()" |
|
|
881 |
] |
|
|
882 |
}, |
|
|
883 |
{ |
|
|
884 |
"cell_type": "markdown", |
|
|
885 |
"metadata": {}, |
|
|
886 |
"source": [ |
|
|
887 |
"## CNN Temporal Model (B)" |
|
|
888 |
] |
|
|
889 |
}, |
|
|
890 |
{ |
|
|
891 |
"cell_type": "code", |
|
|
892 |
"execution_count": 276, |
|
|
893 |
"metadata": {}, |
|
|
894 |
"outputs": [], |
|
|
895 |
"source": [ |
|
|
896 |
"class TemporalCNN(nn.Module):\n", |
|
|
897 |
" def __init__(self):\n", |
|
|
898 |
" super(TemporalCNN, self).__init__()\n", |
|
|
899 |
" self.conv1 = nn.Conv2d(3,3,3)\n", |
|
|
900 |
" self.conv2 = nn.Conv2d(3,3,5)\n", |
|
|
901 |
" self.conv3 = nn.Conv2d(3,3,3)\n", |
|
|
902 |
" self.conv4 = nn.Conv2d(3,3,5)\n", |
|
|
903 |
" self.conv5 = nn.Conv2d(3,3,3)\n", |
|
|
904 |
" self.conv6 = nn.Conv2d(3,3,5)\n", |
|
|
905 |
" self.conv7 = nn.Conv2d(3,3,3)\n", |
|
|
906 |
" self.conv8 = nn.Conv2d(3,3,5)\n", |
|
|
907 |
" self.conv9 = nn.Conv2d(3,3,3)\n", |
|
|
908 |
" self.conv10 = nn.Conv2d(3,3,5)\n", |
|
|
909 |
" self.conv11 = nn.Conv2d(3,3,3)\n", |
|
|
910 |
" self.conv12 = nn.Conv2d(3,3,5)\n", |
|
|
911 |
" self.conv13 = nn.Conv2d(3,3,3)\n", |
|
|
912 |
" self.conv14 = nn.Conv2d(3,3,5) \n", |
|
|
913 |
" self.pool1 = nn.MaxPool2d(2)\n", |
|
|
914 |
" self.pool2 = nn.MaxPool2d(2) \n", |
|
|
915 |
" self.conv15 = nn.Conv2d(3,3,5)\n", |
|
|
916 |
" self.conv16 = nn.Conv2d(3,3,7)\n", |
|
|
917 |
" self.fc1 = nn.Linear(120,512)\n", |
|
|
918 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
919 |
" self.max = nn.Softmax()\n", |
|
|
920 |
" \n", |
|
|
921 |
" def forward(self, x):\n", |
|
|
922 |
" batch_size = x.shape[0]\n", |
|
|
923 |
" tmp = torch.zeros(batch_size, x.shape[1], x.shape[2],26,26).cuda()\n", |
|
|
924 |
" tmp[:,0] = F.relu(self.conv2(F.relu(self.conv1(x[:,0]))))\n", |
|
|
925 |
" tmp[:,1] = F.relu(self.conv4(F.relu(self.conv3(x[:,1]))))\n", |
|
|
926 |
" tmp[:,2] = F.relu(self.conv6(F.relu(self.conv5(x[:,2]))))\n", |
|
|
927 |
" tmp[:,3] = F.relu(self.conv8(F.relu(self.conv7(x[:,3]))))\n", |
|
|
928 |
" tmp[:,4] = F.relu(self.conv10(F.relu(self.conv9(x[:,4]))))\n", |
|
|
929 |
" tmp[:,5] = F.relu(self.conv12(F.relu(self.conv11(x[:,5]))))\n", |
|
|
930 |
" tmp[:,6] = F.relu(self.conv14(F.relu(self.conv13(x[:,6]))))\n", |
|
|
931 |
" x = torch.zeros(batch_size, x.shape[1], x.shape[2],26,26).cuda()\n", |
|
|
932 |
" for i in range(7):\n", |
|
|
933 |
" x[:,i] = tmp[:,i]\n", |
|
|
934 |
" #x[:,0] = F.relu(self.conv1(x[:,0]))\n", |
|
|
935 |
" #x[:,1] = F.relu(self.conv3(x[:,1]))\n", |
|
|
936 |
" #x[:,2] = F.relu(self.conv5(x[:,2]))\n", |
|
|
937 |
" #x[:,3] = F.relu(self.conv7(x[:,3]))\n", |
|
|
938 |
" #x[:,4] = F.relu(self.conv9(x[:,4]))\n", |
|
|
939 |
" #x[:,5] = F.relu(self.conv11(x[:,5]))\n", |
|
|
940 |
" #x[:,6] = F.relu(self.conv13(x[:,6]))\n", |
|
|
941 |
" #x[:,0] = F.relu(self.conv2(x[:,0]))\n", |
|
|
942 |
" #x[:,1] = F.relu(self.conv4(x[:,1]))\n", |
|
|
943 |
" #x[:,2] = F.relu(self.conv6(x[:,2]))\n", |
|
|
944 |
" #x[:,3] = F.relu(self.conv8(x[:,3]))\n", |
|
|
945 |
" #x[:,4] = F.relu(self.conv10(x[:,4]))\n", |
|
|
946 |
" #x[:,5] = F.relu(self.conv12(x[:,5]))\n", |
|
|
947 |
" #x[:,6] = F.relu(self.conv14(x[:,6]))\n", |
|
|
948 |
" #x = x[:,:,:,3:29,3:29]\n", |
|
|
949 |
" #tmp = torch.zeros(batch_size, x.shape[2], x.shape[1]*x.shape[3],x.shape[4]).cuda()\n", |
|
|
950 |
" #for i in range(x.shape[1]):\n", |
|
|
951 |
" # tmp[:,:,i*x.shape[3]:(i+1)*x.shape[3], :] = x[:,i]\n", |
|
|
952 |
" x = x.reshape(batch_size, x.shape[2], x.shape[1]*x.shape[3],-1) # img reshape\n", |
|
|
953 |
" x = self.pool1(x)\n", |
|
|
954 |
" x = F.relu(self.conv15(x))\n", |
|
|
955 |
" x = F.relu(self.conv16(x))\n", |
|
|
956 |
" x = self.pool2(x)\n", |
|
|
957 |
" x = x.view(batch_size,-1)\n", |
|
|
958 |
" x = self.fc1(x)\n", |
|
|
959 |
" x = self.fc2(x)\n", |
|
|
960 |
" x = self.max(x)\n", |
|
|
961 |
" return x" |
|
|
962 |
] |
|
|
963 |
}, |
|
|
964 |
{ |
|
|
965 |
"cell_type": "code", |
|
|
966 |
"execution_count": 277, |
|
|
967 |
"metadata": {}, |
|
|
968 |
"outputs": [ |
|
|
969 |
{ |
|
|
970 |
"name": "stderr", |
|
|
971 |
"output_type": "stream", |
|
|
972 |
"text": [ |
|
|
973 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:65: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
974 |
] |
|
|
975 |
}, |
|
|
976 |
{ |
|
|
977 |
"data": { |
|
|
978 |
"text/plain": [ |
|
|
979 |
"tensor([[0.2572, 0.2522, 0.2505, 0.2401],\n", |
|
|
980 |
" [0.2591, 0.2499, 0.2525, 0.2385]], device='cuda:0',\n", |
|
|
981 |
" grad_fn=<SoftmaxBackward>)" |
|
|
982 |
] |
|
|
983 |
}, |
|
|
984 |
"execution_count": 277, |
|
|
985 |
"metadata": {}, |
|
|
986 |
"output_type": "execute_result" |
|
|
987 |
} |
|
|
988 |
], |
|
|
989 |
"source": [ |
|
|
990 |
"net = TemporalCNN().cuda()\n", |
|
|
991 |
"net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())" |
|
|
992 |
] |
|
|
993 |
}, |
|
|
994 |
{ |
|
|
995 |
"cell_type": "code", |
|
|
996 |
"execution_count": 280, |
|
|
997 |
"metadata": { |
|
|
998 |
"scrolled": true |
|
|
999 |
}, |
|
|
1000 |
"outputs": [ |
|
|
1001 |
{ |
|
|
1002 |
"name": "stdout", |
|
|
1003 |
"output_type": "stream", |
|
|
1004 |
"text": [ |
|
|
1005 |
"Begin Training Fold 1/5\t of Patient 1\n" |
|
|
1006 |
] |
|
|
1007 |
}, |
|
|
1008 |
{ |
|
|
1009 |
"name": "stderr", |
|
|
1010 |
"output_type": "stream", |
|
|
1011 |
"text": [ |
|
|
1012 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:65: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
1013 |
] |
|
|
1014 |
}, |
|
|
1015 |
{ |
|
|
1016 |
"name": "stdout", |
|
|
1017 |
"output_type": "stream", |
|
|
1018 |
"text": [ |
|
|
1019 |
"[1, 100] loss: 1.424\tAccuracy : 0.327\t\tval-loss: 1.384\tval-Accuracy : 0.243\n", |
|
|
1020 |
"[11, 100] loss: 1.422\tAccuracy : 0.320\t\tval-loss: 1.384\tval-Accuracy : 0.270\n", |
|
|
1021 |
"[21, 100] loss: 1.420\tAccuracy : 0.320\t\tval-loss: 1.384\tval-Accuracy : 0.270\n", |
|
|
1022 |
"[31, 100] loss: 1.418\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1023 |
"[41, 100] loss: 1.417\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1024 |
"[51, 100] loss: 1.415\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1025 |
"[61, 100] loss: 1.413\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1026 |
"[71, 100] loss: 1.411\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1027 |
"[81, 100] loss: 1.409\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1028 |
"[91, 100] loss: 1.408\tAccuracy : 0.320\t\tval-loss: 1.383\tval-Accuracy : 0.270\n", |
|
|
1029 |
"Finish Training Fold 1/5\t of Patient 1\n", |
|
|
1030 |
"Begin Training Fold 2/5\t of Patient 1\n", |
|
|
1031 |
"[1, 100] loss: 1.427\tAccuracy : 0.265\t\tval-loss: 1.390\tval-Accuracy : 0.216\n" |
|
|
1032 |
] |
|
|
1033 |
}, |
|
|
1034 |
{ |
|
|
1035 |
"ename": "KeyboardInterrupt", |
|
|
1036 |
"evalue": "", |
|
|
1037 |
"output_type": "error", |
|
|
1038 |
"traceback": [ |
|
|
1039 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
1040 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", |
|
|
1041 |
"\u001b[1;32m<ipython-input-280-7232a97298b5>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 37\u001b[0m \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 38\u001b[0m \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 39\u001b[1;33m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 40\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 41\u001b[0m \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1042 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[0mproducts\u001b[0m\u001b[1;33m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 165\u001b[0m \"\"\"\n\u001b[1;32m--> 166\u001b[1;33m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 168\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1043 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\autograd\\__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[0;32m 97\u001b[0m Variable._execution_engine.run_backward(\n\u001b[0;32m 98\u001b[0m \u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 99\u001b[1;33m allow_unreachable=True) # allow_unreachable flag\n\u001b[0m\u001b[0;32m 100\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 101\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1044 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m: " |
|
|
1045 |
] |
|
|
1046 |
} |
|
|
1047 |
], |
|
|
1048 |
"source": [ |
|
|
1049 |
" if opti=='SGD':\n", |
|
|
1050 |
" optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)\n", |
|
|
1051 |
" elif opti =='Adam':\n", |
|
|
1052 |
" optimizer = optip = 0\n", |
|
|
1053 |
"n_fold = 5 \n", |
|
|
1054 |
"n_patient = len(np.unique(Patient))\n", |
|
|
1055 |
"fold_vloss = np.zeros((n_fold,n_patient))\n", |
|
|
1056 |
"fold_loss = np.zeros((n_fold,n_patient))\n", |
|
|
1057 |
"fold_vacc = np.zeros((n_fold,n_patient))\n", |
|
|
1058 |
"fold_acc = np.zeros((n_fold,n_patient))\n", |
|
|
1059 |
"for patient in np.unique(Patient):\n", |
|
|
1060 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
1061 |
"\n", |
|
|
1062 |
" length = len(id_patient)\n", |
|
|
1063 |
" \n", |
|
|
1064 |
" train_id, test_id = kfold(length,n_fold)\n", |
|
|
1065 |
" \n", |
|
|
1066 |
" for fold in range(n_fold):\n", |
|
|
1067 |
" X_train = tmp[id_patient[train_id[fold]]]\n", |
|
|
1068 |
" X_test = tmp[id_patient[test_id[fold]]]\n", |
|
|
1069 |
" y_train = Label[id_patient[train_id[fold]]]\n", |
|
|
1070 |
" y_test = Label[id_patient[test_id[fold]]] \n", |
|
|
1071 |
"\n", |
|
|
1072 |
" print(\"Begin Training Fold %d/%d\\t of Patient %d\" % \n", |
|
|
1073 |
" (fold+1,n_fold, patient))\n", |
|
|
1074 |
"\n", |
|
|
1075 |
" CNN = TemporalCNN().cuda(0)\n", |
|
|
1076 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
1077 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.001)\n", |
|
|
1078 |
"# optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n", |
|
|
1079 |
"\n", |
|
|
1080 |
" n_epochs = 100\n", |
|
|
1081 |
" for epoch in range(n_epochs):\n", |
|
|
1082 |
" running_loss = 0.0\n", |
|
|
1083 |
" batchsize = 4\n", |
|
|
1084 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
1085 |
" optimizer.zero_grad()\n", |
|
|
1086 |
"\n", |
|
|
1087 |
" # forward + backward + optimize\n", |
|
|
1088 |
" outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n", |
|
|
1089 |
" loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n", |
|
|
1090 |
" loss.backward()\n", |
|
|
1091 |
" optimizer.step()\n", |
|
|
1092 |
" running_loss += loss.item()\n", |
|
|
1093 |
"\n", |
|
|
1094 |
" #acc\n", |
|
|
1095 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n", |
|
|
1096 |
" acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n", |
|
|
1097 |
"\n", |
|
|
1098 |
" #val Loss\n", |
|
|
1099 |
" val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n", |
|
|
1100 |
" val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n", |
|
|
1101 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
1102 |
" val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n", |
|
|
1103 |
"\n", |
|
|
1104 |
" if epoch%10==0:\n", |
|
|
1105 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1106 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
1107 |
" fold_vloss[fold, p ] = val_loss.item()\n", |
|
|
1108 |
" fold_loss[fold, p] = running_loss/i\n", |
|
|
1109 |
" fold_vacc[fold, p] = val_acc\n", |
|
|
1110 |
" fold_acc[fold, p] = acc\n", |
|
|
1111 |
" print('Finish Training Fold %d/%d\\t of Patient %d' % \n", |
|
|
1112 |
" (fold+1,n_fold, patient))\n", |
|
|
1113 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1114 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
1115 |
" \n", |
|
|
1116 |
" p = p + 1" |
|
|
1117 |
] |
|
|
1118 |
}, |
|
|
1119 |
{ |
|
|
1120 |
"cell_type": "markdown", |
|
|
1121 |
"metadata": {}, |
|
|
1122 |
"source": [ |
|
|
1123 |
"### Peresented Results" |
|
|
1124 |
] |
|
|
1125 |
}, |
|
|
1126 |
{ |
|
|
1127 |
"cell_type": "code", |
|
|
1128 |
"execution_count": 35, |
|
|
1129 |
"metadata": { |
|
|
1130 |
"collapsed": true, |
|
|
1131 |
"jupyter": { |
|
|
1132 |
"outputs_hidden": true |
|
|
1133 |
} |
|
|
1134 |
}, |
|
|
1135 |
"outputs": [ |
|
|
1136 |
{ |
|
|
1137 |
"data": { |
|
|
1138 |
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAtAAAAKUCAYAAAAtng/mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzde5hdeVkn+u9rIqI0NIlgq9DSqOiEiTfoabxET2I7Y+MoqIhDVBQn2Mfz2K0j6gxOHGhwMuP9MiMz56hhvIBpAS9P6/QIjFOlxvFCo+gIEWmQSwvKpQNN4wXSvOePvQPVRSWpX6V27aqdz+d59pNaa6+11/vWrqp861e/tVZ1dwAAgPX5sHkXAAAAO4kADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABpiBqnpKVZ1csXx3VX3ierbdwLH+R1V9w0b3B2CMAA1sC1X1NVV12zRovmUaCg/MqZb7VtU7q+oL13juR6vqRaOv2d2XdffrNqG2m6rqeate+7Hd/bMX+9oXOGZX1TWzOgbATiJAA3NXVU9L8mNJ/kOSK5J8QpL/kuTx59h+9yzr6e6/T/KLSb5+1XF3JTmcZGZhdbupqkry5CR3JtnSUe6a8P8UsO34wQTMVVVdnuTZSb6lu3+5u9/T3e/r7l/r7u+abnNTVb2oqp5XVXcleUpVfURV/VhVvXn6+LGq+ojp9g+qql+fjiLfWVW/czaIVdW/qaq/qqp3V9Wrq+rac5T2s0meUFUftWLdF2fyc/N/TF/r6VX12ulrvaqqvuI8fXZVffL044+uqluq6q6q+sMkn7Rq2x+vqjdNn395VX3+dP11Sf5tkn8xHan/k+n65ap66vTjD6uq76mqN1TVW6vq56af41TVVdM6vqGq3lhVb6+qoxd4iz4/yccn+bYkT6qq+6yq9Zuq6tSKz8GjpuuvrKpfrqq3VdU7quonpuvvNYK+oqbdK3o5VlW/m+Rvk3xiVX3jimO8rqr+71U1PL6qXjH9fL22qq6rqidW1ctXbfcdVfWrF+gX4IIEaGDePifJfZP8ygW2e3ySFyV5YJLnJzma5LOTfGaSz0hyTZLvmW77HUnuSPLgTEa0/22SrqpPTXJDkn/S3ffPJBC/fq2Ddff/TvKWJF+5YvWTk/xCd5+ZLr82k4B5eZJnJXleVX3cOnp+TpK/T/JxSf7l9LHSy6Z97U3yC0leWFX37e7fyGSU/henU0I+Y43Xfsr0cSjJJya5LMlPrNrmQJJPTXJtkmdU1b7z1PoNSX4tkxH5JPnSs09U1ROT3JTJSP0DkjwuyTumI/W/nuQNSa5K8pAkN5/nGKs9Ocn1Se4/fY23To/7gCTfmORHVwT1a5L8XJLvyuRr4wsyeU9vSfLwVb19XZKfH6gDYE0CNDBvH53k7StC6bn8Xnf/ane/v7v/LsnXJnl2d7+1u9+WSYB98nTb92USTh82Hc3+ne7uJPck+Ygkj6yqD+/u13f3a89zzJ/LdBpHVT0gkxD/gekb3f3C7n7ztKZfTPKaTIL8OU3D5ROSPGM62v5nWTUlpLuf193v6O4z3f3D05o/9QKfn7O+NsmPdPfruvvuJN+dycjxymkvz+ruv+vuP0nyJ5n8ArJWrR+V5ImZ/NLwvkx+gVk5jeOpSX6gu1/WE7d39xumn4OPT/Jd0x7/vrtHTpL8me5+5bT/93X3f+/u106P8VtJXpLJLy5JciTJc7v7pdP34a+6+8+7+x8yCf1fN+3lH2cS5n99oA6ANQnQwLy9I8mD1jGv+U2rlj8+k9HJs94wXZckP5jk9iQvmf7J/+lJ0t23J/lXmYyavrWqbq6qj08+cJWMs49PmL7OzyU5VFUPSfJVSW7v7j8+e8Cq+vrp1IF3VtU7k+xP8qAL9PHgJLtX9bOyj7NTDU5V1bumr3v5Ol73rLU+L7szGYk/669XfPy3mYxSr+UrkpxJcut0+flJHltVD54uX5nJKPxqVyZ5wzp+KTqXe73XVfXYqvr96XScdyb5knzw83GuGpLJLyZfU/WBedwvmAZrgIsiQAPz9nuZTGf48gts16uW35zkYSuWP2G6Lt397u7+ju7+xCRfluRpZ+c6d/cvdPeB6b6d5Pun6y9b8XjjdN0bk/xOJqO6T84kUCdJquphSX4qkykhH93dD0zyZ0nqAn28LZNQeuWq2s++7ucn+TdJvjrJnunrvmvF667+PKy21uflTJK/ucB+a/mGTML1G6vqr5O8MMmHZ3IiZTIJup+0xn5vSvIJ5/il6D1JVs4r/9g1tvlAjzWZ1/5LSX4oyRXTz8et+eDn41w1pLt/P8l7Mxmt/pqYvgFsEgEamKvufleSZyR5TlV9eVV9VFV9+HTU8QfOs+uJJN9TVQ+uqgdNX+N5SVJVX1pVnzwdebwrk6kb91TVp1bVF05D2d8n+bvpc+fzs5mE5M/LZAT2rPtlEvTeNj3mN2YyAn2hfu9J8stJbpr2+sjce1rE/TMJvG9LsruqnpHJ3N+z/ibJVXXuq1OcSPLtVfXwqrosH5wzPTQaPB11vzaTucefmQ/ONf/+FfX+dJLvrKpH18QnT3+x+MNM5o9/X1XdryaXBfy86T6vSPIFVfUJ05Mbv/sCpdwnkyksb0typqoem+SfrXj+eJJvrKpra3IC5UOq6h+teP7nMpkDfmZwGgnAOQnQwNx1948keVomJwG+LZNRxRuSnO+KCf8+yW1J/jTJ/0nyR9N1SfKIJP8zyd2ZjHD/l+5eziSIfV+St2cyjeFjMjnB8HxelGRPkt/s7resqPlVSX54+vp/k+TTkvzuevqd9nbZtIafSfLfVjz34kyu8vEXmUy/+Pvce0rDC6f/vqOq/miN135uJiOtv53kL6f737jOulZ6cpJXdPdLuvuvzz6S/Kckn15V+7v7hUmOZXKi47szeb/2Tn9J+LIkn5zkjZmc0PkvkqS7X5rJ3OQ/TfLyXGBOcne/O8m3JnlBktOZjCTfsuL5P8z0xMJMRup/K/cegf/5TH6xMfoMbJqanFcDAIunqj4yk6t4PKq7XzPveoDFYAQagEX2/yR5mfAMbKaZ3s0LAOalql6fycmGFzpBFWCIKRwAADDAFA4AABggQANsY1X1+qr6ounHN1XV89a533JVPXW21QFcmgRogEFV1VX1nuldC/+qqn5keovuHaOqPqWqXlhVb5/e8fBPq+ppVbWrqq6a9vjfV+3zvKq6afrxwek2z1m1zcmqesrWdQKw9QRogI35jO6+LJObjXxNkm8afYF13L58Jqrqk5L8QSbXl/607r48yROTXJ3JjVzO+uwVN0BZy3uSfH1VXTWjUgG2JQEa4CJ0959ncrvv/UlSVU+vqtdW1bur6lVV9RVnt62qp1TV71bVj1bVnZncjfCTqup/VdU7pqPBz6+qB67n2FX12VX1v6vqnVX1J1V1cJ1lPyvJ/+7up529OUx3v7q7v6a737liux/IB29Os5Z3ZnIjmGeu87gAC0GABrgI01txf36SP56ueu10+fJMgurzqurjVuzymCSvy+QuiMcyuczaf0zy8Un2JbkyyU3rOO5Dkvz3TALu3iTfmeSXqurB6yj7izK5w+KFPCfJp5ydg30Ox5I8oao+dR2vB7AQBGiAjfmjqjqd5NeS/HSmt+Pu7hd295u7+/3d/YtJXpPkmhX7vbm7/3N3n+nuv+vu27v7pd39D939tiQ/kuT/Wsfxvy7Jrd196/RYL83k1uZfso59PzrJWy641eQ24MdynlHo6e29/98kz17H6wEsBDdSAdiYR3X37atXVtXXJ3lakqumqy5L8qAVm7xp1fYfk+Q/ZTJqff9MBjZOr+P4D0vyxKr6shXrPjzJ0jr2fUeSj7vgVhM/leS7Vh1nte9P8tqq+ox1vibAjmYEGmCTVNXDMgmcNyT56O5+YJI/y2Saxlmr7171H6frPr27H5DJyHLlwt6U5Oe7+4ErHvfr7u9bx77/M8kT1rFduvt9mUxF+d5z1dXd70jyY9NtABaeAA2wee6XSRh+W5JU1TdmenLhedw/yd1J3jmd1/xd6zzW85J8WVV98fTSc/edXlruoevY95lJPreqfrCqPnZa6ydPL1O31gmMP5/kI5Jcd57X/JEkn5vJPG6AhSZAA2yS7n5Vkh9O8ntJ/ibJpyX53Qvs9qwkj0ryrkxOCvzldR7rTUken+TfZhLY35RJ+L7gz/Xufm2Sz8lkmskrq+pdSX4pkznU715j+3syCd17z/Oad2Vy1Y5zbgOwKKp79V8TAQCAczECDQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwYPe8Cxj1oAc9qK+66qqZH+c973lP7ne/+838OFthkXpJFqufReolWax+FqmXZLH6WaReksXqZ5F6SRarn0XqJdm6fl7+8pe/vbsfvHr9jgvQV111VW677baZH2d5eTkHDx6c+XG2wiL1kixWP4vUS7JY/SxSL8li9bNIvSSL1c8i9ZIsVj+L1Euydf1U1RvWWm8KBwAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBg97wLYPNU1Yb26+5NrgQAYHHNdAS6qq6rqldX1e1V9fQ1nn9YVf1mVf1pVS1X1UNnWc+i6+41H+d7TngGABgzswBdVbuSPCfJY5M8Msnhqnrkqs1+KMnPdfenJ3l2kv84q3oAAGAzzHIE+pokt3f367r7vUluTvL4Vds8MslvTj9eWuN5AADYVmpWf8Kvqq9Kcl13P3W6/OQkj+nuG1Zs8wtJ/qC7f7yqvjLJLyV5UHe/Y9VrXZ/k+iS54oorHn3zzTfPpOaV7r777lx22WUzP85WOHToUJaWluZdxqZZpPdmkXpJFqufReolWax+FqmXZLH6WaReksXqZ5F6Sbaun0OHDr28u69evX6WJxGudUbb6rT+nUl+oqqekuS3k/xVkjMfslP3Tyb5ySS5+uqr++DBg5ta6FqWl5ezFcfZKovUyyK9Nzuxl0vlZNWd+N6czyL1s0i9JIvVzyL1kixWP4vUSzL/fmYZoO9IcuWK5YcmefPKDbr7zUm+Mkmq6rIkT+jud82wJuAinS8IV9WOC8oAMGqWc6BfluQRVfXwqrpPkicluWXlBlX1oKo6W8N3J3nuDOsBAICLNrMA3d1nktyQ5MVJTiV5QXe/sqqeXVWPm252MMmrq+ovklyR5Nis6gEAgM0w0xupdPetSW5dte4ZKz5+UZIXzbIGAADYTG7lDQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGDDTW3kDALB1qmpD+3X3Jley2ARoAIAFca4gXFVC8iYyhQMAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAgEv6Riru1gMAwKhLOkCfLwi7Yw8AwPxs54HOSzpAAwCwPW3ngU5zoAEAYIAADQAAAwRoAAAYYA70TnPT5cO79DMfsKH9ctO7xvcBAFhwAvQOU8+6a3jS/PLycg4ePDh2nKr0TUO7AABcEkzhAACAAQI0AAAMEKABAGCAOdAAbEvb+S5kwKVNgAZgWzpXEJ73HcgATOEAAIABAjSwpr1796aqhh5JhvfZu3fvnDsFgDECNLCm06dPp7uHHktLS8P7nD59et6tAsAQARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNMzBiRMnsn///lx77bXZv39/Tpw4Me+SAIB12j3vAuBSc+LEiRw9ejTHjx/PPffck127duXIkSNJksOHD8+5OgDgQoxAwxY7duxYjh8/nkOHDmX37t05dOhQjh8/nmPHjs27NABgHQRo2GKnTp3KgQMH7rXuwIEDOXXq1Jwqgvnau3dvqmrdjyRD21dV9u7dO+cugUUiQMMW27dvX06ePHmvdSdPnsy+ffvmVBHM1+nTp9Pd634sLS0Nbd/dOX369LzbBBaIAA1b7OjRozly5EiWlpZy5syZLC0t5ciRIzl69Oi8SwMA1sFJhLDFzp4oeOONN+bUqVPZt29fjh075gRCANghBGiYg8OHD+fw4cNZXl7OwYMH510OADDAFA4AABggQAMAwABTOAAWxNlLvI3q7k2uBGCxCdAAC+J8QbiqBGWATWIKBwAADBCgAQBgwCURoEdvE+tWsQAAnMslMQf67G1iR2zk+rwbPYFn1FYcZ8+ePTM/BgDATnRJjEAvku4efiwtLQ3vc+edd867VQCAbUmABgCAAQI0AMAOM3p+V+Lcrs000wBdVddV1aur6vaqevoaz39CVS1V1R9X1Z9W1ZfMsh4AgEVw9vyuWU7nPH369Lzb3LZmFqCraleS5yR5bJJHJjlcVY9ctdn3JHlBd39Wkicl+S+zqgcAADbDLEegr0lye3e/rrvfm+TmJI9ftU0necD048uTvHmG9QAAwEWb5WXsHpLkTSuW70jymFXb3JTkJVV1Y5L7JfmitV6oqq5Pcn2SXHHFFVleXh4uZnSfu+++e0uOsxU22st2tUj9bPdefN8sz7uMTbWd+xmpbZG+zpLF+lpbpF6S7d/Ppfx9k8y3thq9PvK6X7jqiUm+uLufOl1+cpJruvvGFds8bVrDD1fV5yQ5nmR/d7//XK979dVX92233TZay5ZdB3pWn8+LsZFetrNF6mc79+L7Zvu+NxuxXT/PyXhti/R1lizW19oi9ZJs734u9e+braqtql7e3VevXj/LKRx3JLlyxfJD86FTNI4keUGSdPfvJblvkgfNsCYAALgoswzQL0vyiKp6eFXdJ5OTBG9Ztc0bk1ybJFW1L5MA/bYZ1gQAABdlZnOgu/tMVd2Q5MVJdiV5bne/sqqeneS27r4lyXck+amq+vZMTih8Sm/XvxWwpTZ6u3JfPgDArM3yJMJ0961Jbl217hkrPn5Vks+bZQ3sTOcLwtt5ThYAsPjciRAAAAYI0AAAMECABgCAAQI0AAAMEKABAGDATK/CAQC4NCcsGgEaAGbMpTlhsZjCAQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AwNzs3bs3VTX0SDK8z969ezetZgEaAIC5OX36dLp76LG0tDS8z+nTpzetZgGaudqJv3UCAJc2AZq52om/dQIAl7bd8y4AAGBezv5lc1R3b3Il7CQCNABwyTpfEK4qQZk1mcIBAAADBGiAHcbJtwDzJUAD7DBOvt2+/HIDlwYBGgA2iV9u4NIgQAMAwAABGgAABriMHQDADtPPfEBy0+Xr3v5gkixv4BisSYAGAIa4+cj81bPuGvp8Li8v5+DBg2PHqErfNFbXpUKABgCGnCu4ufEIlwpzoAEAYIAADQAAA0zhAGCunAwF7DQCNADzddO7hjbfyMlQAJvJFA4AABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIDrQAPAJhm9KUzixjCwEwnQADuMkLZ91bPuSncP7bORG8NUVfqmoV2ATSRAA+wwQhrAfJkDDQCsae/evamqdT+SDG1fVdm7d++cu4RxAjQAsKbTp0+nu9f9WFpaGtq+u3P69Ol5twnDBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABrgMNm2Tv3r1bcjb5nj17cuedd878OADA2oxAwyYZvdyTSz4BwM4kQAMAwAABGgBYeKN3VXRnRc5HgAYAFp5pdmwmJxEusBMnTuTYsWM5depU9u3bl6NHj+bw4cPzLosdop/5gOSmy4f2OZgkyxs4DgDsIAL0gjpx4kSOHj2a48eP55577smuXbty5MiRJBGiWZd61l3p7qF9lpeXc/DgwbHjVKVvGtoFAObKFI4FdezYsRw/fjyHDh3K7t27c+jQoRw/fjzHjh2bd2kAADuaEegFderUqRw4cOBe6w4cOJBTp07NqSKAS8PZk89mac+ePTM/BnBuRqAX1L59+3Ly5Ml7rTt58mT27ds3p4oAFt/oCWcbPVHNzZRgvgToBXX06NEcOXIkS0tLOXPmTJaWlnLkyJEcPXp03qUBAOxopnAsqLMnCt54440fuArHsWPHnEAIAHCRBOgFdvjw4Rw+fHhDV0YAAGBtpnAAAMAAARoAAAbMdApHVV2X5MeT7Ery0939faue/9Ekh6aLH5XkY7r7gbOsCQCA7WMn3vl2ZgG6qnYleU6Sf5rkjiQvq6pbuvtVZ7fp7m9fsf2NST5rVvUAALD97MQ7385yCsc1SW7v7td193uT3Jzk8efZ/nCSEzOsBwAALtosp3A8JMmbVizfkeQxa21YVQ9L8vAk/+scz1+f5PokueKKK7K8vDxczOg+d99995YcZytstJetskjvzSL1spHjbPd+Rvi+2dhxtsJ2f29Gbfd+Rmrb7l9ni/Z9470Z22cjxzmnjdw1aT2PJE/MZN7z2eUnJ/nP59j235zrudWPRz/60T1q0uaYpaWlLTnOVthIL1tlkd6bReplo8fZzv2M8n3jvdkq27mf0a+B7fx1tmjfN96bpS05TpLbeo08OsspHHckuXLF8kOTvPkc2z4ppm8AALADzDJAvyzJI6rq4VV1n0xC8i2rN6qqT02yJ8nvzbAWAADYFDML0N19JskNSV6c5FSSF3T3K6vq2VX1uBWbHk5y83SYHAAAtrWZXge6u29Ncuuqdc9YtXzTLGsAAIDNNNMADReyEy+eDgBc2gRo5uumdw3vspGLpwMAbJZZnkQIAAALR4AGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAM2D3vAmBR9DMfkNx0+dA+B5NkeQPHYdNU1Yb26+5NrgSAnUKAhs1y07uGd1leXs7Bgwc3vxbW7VxBuKqEZADWZAoHAAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADds+7AAAAxlXVTF9/z549M339nUyABgDYYbp7aPuqGt6HczOFAwAABgjQAAAwQIAGAIAB5kADAAuvn/mA5KbLh/Y5mCTLGzgOC0+ABgAWXj3rruGT6JaXl3Pw4MGx41SlbxrahR3IFA4AABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAbvnXQAA46pq5sfYs2fPzI8BsBMJ0AA7THcP71NVG9oPgA9lCgcAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAV+EAANbUz3xActPl697+YJIsb+AYsMMI0ADAmupZdw1d/nB5eTkHDx4cO0ZV+qaxumDeTOEAAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGDA7nkXAACwFapq5sfYs2fPzI/B/F1wBLqqbqiqDX01VNV1VfXqqrq9qp5+jm2+uqpeVVWvrKpf2MhxAADOp7uHHxvZ784775xzp2yF9YxAf2ySl1XVHyV5bpIX99mvqvOoql1JnpPknya5Y/oat3T3q1Zs84gk353k87r7dFV9zEaaAACArXLBEeju/p4kj0hyPMlTkrymqv5DVX3SBXa9Jsnt3f267n5vkpuTPH7VNt+U5DndfXp6rLcO1g8AAFtqXXOgu7ur6q+T/HWSM0n2JHlRVb20u//1OXZ7SJI3rVi+I8ljVm3zKUlSVb+bZFeSm7r7N1a/UFVdn+T6JLniiiuyvLy8nrLvZXSfu+++e0uOsxU22st2tUj9bPdeLuXvm2T71rVRi9LPdv++GbXd+xmpbdF+BiTbu7ZR27mXnfb/TV1oNkZVfWuSb0jy9iQ/neRXu/t9VfVhSV7T3WuORFfVE5N8cXc/dbr85CTXdPeNK7b59STvS/LVSR6a5HeS7O/ud56rnquvvrpvu+22gRYnJw2sY9bJvSwvL+fgwYMzP85W2Egv29ki9bOde7nUv2+2a10btUj9bOfvm43Yzv2Mft0s0s+AZHvXNmo797Kd/7+pqpd399Wr169nBPpBSb6yu9+wcmV3v7+qvvQ8+92R5MoVyw9N8uY1tvn97n5fkr+sqldnMl3kZeuoCwAAttx6rgN9a5IPnFJaVfevqsckSXefOs9+L0vyiKp6eFXdJ8mTktyyaptfTXJo+roPymRKx+vWXz4AAGyt9YxA/9ckj1qx/J411n2I7j5TVTckeXEm85uf292vrKpnJ7mtu2+ZPvfPqupVSe5J8l3d/Y4N9HFe/cwHJDddPrTPwSRZ3sBxAABYaOsJ0LXysnXTqRvrPfnw1kxGsFeue8aKjzvJ06aPmaln3bV1c2tuGtoFAIAdZj1TOF5XVd9aVR8+fXxbTLMAAOAStZ4A/c1JPjfJX+WDl6K7fpZFAQDAdnXBqRjTm5s8aQtqAbaZqpr5Mfbs2TPzYwDAZrpggK6q+yY5kuQfJ7nv2fXd/S9nWBcwZxu5Xuh2vs4oAGyW9Uzh+PkkH5vki5P8VibXc373LIsCAIDtaj0B+pO7+98leU93/2ySf57k02ZbFgAAo6pqzcf5ntuK6XqLZj0B+n3Tf99ZVfuTXJ7kqplVBADAhnT3mo+lpaVzPmfq3bj1XM/5J6tqT5LvyVKoQsEAABjzSURBVOROgpcl+XczrQoAALap8wboqvqwJHd19+kkv53kE7ekKgCALXCh6Qvnet6o7aXtvFM4uvv9SW7YoloAALbU+aY1nG/aA5e29cyBfmlVfWdVXVlVe88+Zl4ZAABsQ+uZA332es/fsmJdx3QOAAAuQeu5E+HDt6IQAADYCdZzJ8KvX2t9d//c5pcDsPn27t2b06dPD+83em3UPXv25M477xw+DgA7y3qmcPyTFR/fN8m1Sf4oiQAN7AinT58ePulneXk5Bw8eHNrHzQgALg3rmcJx48rlqro8k9t7AwDAJWc9V+FY7W+TPGKzCwEAgJ1gPXOgfy2Tq24kk8D9yCQvmGVRAACwXa1nDvQPrfj4TJI3dPcdM6oHAAC2tfUE6DcmeUt3/32SVNVHVtVV3f36mVYGAADb0HrmQL8wyftXLN8zXQcAAJec9QTo3d393rML04/vM7uSAABg+1pPgH5bVT3u7EJVPT7J22dXEgAAbF/rmQP9zUmeX1U/MV2+I8madycEAIBFt54bqbw2yWdX1WVJqrvfPfuyAABge7rgFI6q+g9V9cDuvru7311Ve6rq329FcQAAsN2sZw70Y7v7nWcXuvt0ki+ZXUkAALB9rSdA76qqjzi7UFUfmeQjzrM9AAAsrPWcRPi8JL9ZVf9tuvyNSX52diUBAMD2dcER6O7+gST/Psm+JI9M8htJHjbjugDgXk6cOJH9+/fn2muvzf79+3PixIl5lwRcotYzAp0kf53J3Qi/OslfJvmlmVUEAKucOHEiR48ezfHjx3PPPfdk165dOXLkSJLk8OHDc64OuNSccwS6qj6lqp5RVaeS/ESSN2VyGbtD3f0T59oPADbbsWPHcvz48Rw6dCi7d+/OoUOHcvz48Rw7dmzepQGXoPONQP95kt9J8mXdfXuSVNW3b0lVAJuon/mA5KbLh/Y5mCTLGzgOM3Hq1KkcOHDgXusOHDiQU6dOzaki4FJ2vgD9hCRPSrJUVb+R5OYktSVVAWyietZd6e6hfZaXl3Pw4MGx41SlbxrahXXat29fTp48mUOHDn1g3cmTJ7Nv3745VgVcqs45haO7f6W7/0WSf5TJOMy3J7miqv5rVf2zLaoPAHL06NEcOXIkS0tLOXPmTJaWlnLkyJEcPXp03qUBl6D13Mr7PUmen+T5VbU3yROTPD3JS2ZcGwAk+eCJgjfeeGNOnTqVffv25dixY04gBOZivVfhSJJ0951J/r/pAwC2zOHDh3P48OENTa8B2EzruRMhAAAwJUADAMCAoSkcO1nV7C8gsmfPnpkfAwCA+bokAvTo5auS6eWoNrAfAACLzRQOAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0Ac3DixIns378/1157bfbv358TJ07MuyRgnS6JG6kAwHZy4sSJHD16NMePH88999yTXbt25ciRI0mSw4cPz7k64EKMQAPAFjt27FiOHz+eQ4cOZffu3Tl06FCOHz+eY8eOzbs0YB0EaADYYqdOncqBAwfute7AgQM5derUnCoCRgjQALDF9u3bl5MnT95r3cmTJ7Nv3745VQSMEKABYIsdPXo0R44cydLSUs6cOZOlpaUcOXIkR48enXdpwDo4iRAAttjZEwVvvPHGnDp1Kvv27cuxY8ecQAg7hAANAHNw+PDhHD58OMvLyzl48OC8ywEGmMIBAAADBGgAABggQAMAwIBLeg50VW3o+e6eRTkAAOwAl/QIdHef87G0tHTO5wAAuHRd0gEaAABGCdAAADBAgAYAgAECNAAADBCgAQBgwEwDdFVdV1Wvrqrbq+rpazz/lKp6W1W9Yvp46izrAQCAizWz60BX1a4kz0nyT5PckeRlVXVLd79q1aa/2N03zKoOAADYTLMcgb4mye3d/brufm+Sm5M8fobHAwCAmZvlnQgfkuRNK5bvSPKYNbZ7QlV9QZK/SPLt3f2m1RtU1fVJrk+SK664IsvLy5tf7Sp33333lhxnKyxSL8li9bNIvZy1XfsZrWuj7808+z906NB5nz/X3VWXlpZmUc7MLNr3zXbvZ6S2nfh9cz7b/b0Zsd172XE/o893N76LeSR5YpKfXrH85CT/edU2H53kI6Yff3OS/3Wh1330ox/dW2FpaWlLjrMVFqmX7sXqZ5F66e6e/EjZfjZS10bem+3af/difa0tUi/d27uf0a9p3zfb13buZTv/jE5yW6+RR2c5heOOJFeuWH5okjev3KC739Hd/zBd/Kkkj55hPQAAcNFmGaBfluQRVfXwqrpPkicluWXlBlX1cSsWH5fk1AzrAQCAizazOdDdfaaqbkjy4iS7kjy3u19ZVc/OZDj8liTfWlWPS3ImyZ1JnjKregAAYDPM8iTCdPetSW5dte4ZKz7+7iTfPcsaAABgM7kTIQAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBg97wLAHaWqtrQ8909i3IAYMsZgQaGdPc5H0tLS+d8DgAWhQANAAADBGgAABggQAMAwAABGgAABrgKBwAAc3WhKzxthj179mzaawnQAADMzUau1FRVc73CkykcAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAA1yFAwA4p1lfXmwzLy0GW0WABgDWNHqZsHlfWgy2iikcAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAa4kQoAANvOhe6Cea7nt+JmPkagAQDYdrr7nI+lpaVzPrcVBGgAABggQAMAwAABGgAABjiJELgkXOhklM2wZ8+emR8DgPkToIGFt5GTSqpqy05GAWBnMYUDAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAA2YaoKvquqp6dVXdXlVPP892X1VVXVVXz7IeAAC4WDML0FW1K8lzkjw2ySOTHK6qR66x3f2TfGuSP5hVLQAAsFlmOQJ9TZLbu/t13f3eJDcnefwa231vkh9I8vczrAUAADZFdfdsXrjqq5Jc191PnS4/OcljuvuGFdt8VpLv6e4nVNVyku/s7tvWeK3rk1yfJFdcccWjb7755pnUvNLdd9+dyy67bObH2QqL1EuyWP0sUi/JYvVz6NChLC0tzbuMTbNI780i9ZIsVj++b7avReol2bp+Dh069PLu/pApxrtneMxaY90H0npVfViSH03ylAu9UHf/ZJKfTJKrr766Dx48uDkVnsfy8nK24jhbYZF6SRarn0XqJVm8fhapl0V6bxapl2Tx+lmkXhbpvVmkXpL59zPLKRx3JLlyxfJDk7x5xfL9k+xPslxVr0/y2UlucSIhAADb2SwD9MuSPKKqHl5V90nypCS3nH2yu9/V3Q/q7qu6+6okv5/kcWtN4QAAgO1iZgG6u88kuSHJi5OcSvKC7n5lVT27qh43q+MCAMAszXIOdLr71iS3rlr3jHNse3CWtQAAwGZwJ0IAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABuyedwEAwM5SVRt6rrtnUQ5sOSPQAMCQ7l7zsbS0dM7nhGcWiQANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMGCmAbqqrquqV1fV7VX19DWe/+aq+j9V9YqqOllVj5xlPQAAcLFmFqCraleS5yR5bJJHJjm8RkD+he7+tO7+zCQ/kORHZlUPAABshlmOQF+T5Pbufl13vzfJzUkev3KD7r5rxeL9kvQM6wEAgItW3bPJrFX1VUmu6+6nTpefnOQx3X3Dqu2+JcnTktwnyRd292vWeK3rk1yfJFdcccWjb7755pnUvNLdd9+dyy67bObH2QqL1EuyWP0sUi/JYvVz6NChLC0tzbuMTbNI780i9ZIsVj+L1EuyWP0sUi/J1vVz6NChl3f31avXzzJAPzHJF68K0Nd0943n2P5rptt/w/le9+qrr+7bbrtt0+tdbXl5OQcPHpz5cbbCIvWSLFY/i9RLslj9VFVm9fNxHhbpvVmkXpLF6meRekkWq59F6iXZun6qas0APcspHHckuXLF8kOTvPk829+c5MtnWA8AAFy0WQbolyV5RFU9vKruk+RJSW5ZuUFVPWLF4j9P8iHTNwAAYDvZPasX7u4zVXVDkhcn2ZXkud39yqp6dpLbuvuWJDdU1RcleV+S00nOO30DAADmbWYBOkm6+9Ykt65a94wVH3/bLI8PAACbzZ0IAQBggAANAAADBGgAABggQAMAwAABGgAABgjQAAAwQIAGAIABAjQAAAwQoAEAYIAADQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABgjQMAcnTpzI/v37c+2112b//v05ceLEvEsCANZp97wLgEvNiRMncvTo0Rw/fjz33HNPdu3alSNHjiRJDh8+POfqAIALMQINW+zYsWM5fvx4Dh06lN27d+fQoUM5fvx4jh07Nu/SAIB1MAINW+zUqVM5cODAvdYdOHAgp06dmlNFl7aq2tBz3T2LcgDYAYxAwxbbt29fTp48ea91J0+ezL59++ZU0aWtu9d8LC0tnfM54Rng0iZAwxY7evRojhw5kqWlpZw5cyZLS0s5cuRIjh49Ou/SAIB1MIUDttjZEwVvvPHGnDp1Kvv27cuxY8ecQAgAO4QADXNw+PDhHD58OMvLyzl48OC8ywEABpjCAQAAAwRoAAAYIEADAMAAARoAAAYI0AAAMECABgCAAQI0AAAMEKABAGCAAA0AAAMEaAAAGCBAAwDAAAEaAAAGCNAAADBAgAYAgAECNAAADBCgAQBggAANAAADBGgAABggQAMAwAABGgAABlR3z7uGIVX1tiRv2IJDPSjJ27fgOFthkXpJFqufReolWax+FqmXZLH6WaReksXqZ5F6SRarn0XqJdm6fh7W3Q9evXLHBeitUlW3dffV865jMyxSL8li9bNIvSSL1c8i9ZIsVj+L1EuyWP0sUi/JYvWzSL0k8+/HFA4AABggQAMAwAAB+tx+ct4FbKJF6iVZrH4WqZdksfpZpF6SxepnkXpJFqufReolWax+FqmXZM79mAMNAAADjEADAMAAARoAAAYI0KtU1XOr6q1V9WfzruViVdWVVbVUVaeq6pVV9W3zrmmjquq+VfWHVfUn016eNe+aNkNV7aqqP66qX593LRejql5fVf+nql5RVbfNu56LVVUPrKoXVdWfT79/PmfeNW1EVX3q9D05+7irqv7VvOu6GFX17dOfAX9WVSeq6r7zrmmjqurbpn28cie+L2v9f1lVe6vqpVX1mum/e+ZZ44hz9PPE6fvz/qraMZeAO0cvPzj9mfanVfUrVfXAedY44hz9fO+0l1dU1Uuq6uO3siYB+kP9TJLr5l3EJjmT5Du6e1+Sz07yLVX1yDnXtFH/kOQLu/szknxmkuuq6rPnXNNm+LYkp+ZdxCY51N2fuSDXGf3xJL/R3f8oyWdkh75H3f3q6XvymUkeneRvk/zKnMvasKp6SJJvTXJ1d+9PsivJk+Zb1cZU1f4k35Tkmky+xr60qh4x36qG/Uw+9P/Lpyf5ze5+RJLfnC7vFD+TD+3nz5J8ZZLf3vJqLs7P5EN7eWmS/d396Un+Isl3b3VRF+Fn8qH9/GB3f/r059uvJ3nGVhYkQK/S3b+d5M5517EZuvst3f1H04/fnUkIeMh8q9qYnrh7uvjh08eOPgO2qh6a5J8n+el518IHVdUDknxBkuNJ0t3v7e53zreqTXFtktd291bcyXWWdif5yKraneSjkrx5zvVs1L4kv9/df9vdZ5L8VpKvmHNNQ87x/+Xjk/zs9OOfTfLlW1rURVirn+4+1d2vnlNJG3aOXl4y/VpLkt9P8tAtL2yDztHPXSsW75ctzgQC9CWiqq5K8llJ/mC+lWzcdLrDK5K8NclLu3vH9jL1Y0n+dZL3z7uQTdBJXlJVL6+q6+ddzEX6xCRvS/LfptNrfrqq7jfvojbBk5KcmHcRF6O7/yrJDyV5Y5K3JHlXd79kvlVt2J8l+YKq+uiq+qgkX5LkyjnXtBmu6O63JJNBnCQfM+d6WNu/TPI/5l3ExaqqY1X1piRfGyPQbLaquizJLyX5V6t+Y9tRuvue6Z9qHprkmumfQHekqvrSJG/t7pfPu5ZN8nnd/agkj81kqtAXzLugi7A7yaOS/Nfu/qwk78nO+jP0h6iq+yR5XJIXzruWizGdT/v4JA9P8vFJ7ldVXzffqjamu08l+f5M/qz+G0n+JJNpdzBTVXU0k6+158+7lovV3Ue7+8pMerlhK48tQC+4qvrwTMLz87v7l+ddz2aY/jl9OTt7rvrnJXlcVb0+yc1JvrCqnjffkjauu988/fetmcyxvWa+FV2UO5LcseIvHC/KJFDvZI9N8kfd/TfzLuQifVGSv+zut3X3+5L8cpLPnXNNG9bdx7v7Ud39BZn8efo1865pE/xNVX1ckkz/feuc62GFqvqGJF+a5Gt7sW4E8gtJnrCVBxSgF1hVVSbzOE9194/Mu56LUVUPPnvGcFV9ZCb/kf75fKvauP7/27u3EKuqOI7j3x9NpF3owbJ7TJQk1INghBGFoJkF+VI9RIRWUEHkW5Q+FNVDvkdQiWCQBmZ0gcIsIii6UOakXYSg0HroKYNCK7F/D3tPjdOM43bCM2f8fmCYfdbZa+3/YuDM/6y91l5Vq6rq/KoapLm1/m5V9eVIWpJTkpw2fAwsobk93Zeq6ifghySXtkWLgK97GNL/4Tb6fPpGaw+wIMnJ7efbIvp0gSdAktnt7wtpFqpNh7/R68Dy9ng58FoPY9EISZYCDwHLqmpfr+OZrFGLbpdxjHOCgWN5sX6Q5EVgIXBGkh+BR6tqXW+jOmpXA3cAO9u5wwCrq+rNHsZ0tM4Bnk9yAs0Xv01V1dePfptGzgJeafIZBoCNVbWltyFN2gPAhnbqw3fAnT2O56i182uvA+7tdSyTVVWfJNkMfE5zC3o7/b098ctJZgEHgPuram+vA+pirP+XwBpgU5K7ab7w3Nq7CLsZpz8/A08BZwJvJBmqqut7F+WRGacvq4CTgLfbz+uPq+q+ngXZwTj9ubEd6PgL2A0c0764lbckSZLUgVM4JEmSpA5MoCVJkqQOTKAlSZKkDkygJUmSpA5MoCVJkqQOTKAlaYpIcjDJUJIvk7zUPoLucOevHvX6w0lce0WSc8d57/Eki8coX5jEx0lKOu6YQEvS1LG/quZV1eXAn0z8XNNDEuiqmsyufCtotsf+j6p6pKremUTbkjStmEBL0tT0PnAJQJJXk2xL8lWSe9qyNcDMdsR6Q1v223DlJA8m+TTJjiSPtWWDSb5JsrZta2uSmUluAa6g2TxmqN3tkxFtrW/PIcnSJLuSfECze54kHXdMoCVpikkyANwA7GyL7qqq+TRJ7soks6rqYf4dsb59VP0lwBzgSmAeMD/Jte3bc4Cnq+oy4Bfg5qraDHwG3N62t3+cuGYAa4GbgGuAs/+/XktS/zCBlqSpY2aSIZpkdg+wri1fmeQL4GPgApok+HCWtD/baba9njuizvdVNdQebwMGO8Q3t63/bTXb2L7Qoa4kTRsDvQ5AkvSP/VU1b2RBkoXAYuCqqtqX5D1gxgTtBHiyqp4d1dYg8MeIooPAIdM1jkB1PF+Sph1HoCVpajsd2Nsmz3OBBSPeO5DkxDHqvAXcleRUgCTnJZk9wXV+BU6b4JxdwEVJLm5f3zZx+JI0/ZhAS9LUtgUYSLIDeIJmGsew54Adw4sIh1XVVmAj8FGSncBmJk6O1wPPjLWIcES7vwP3AG+0iwh3H0V/JKnvpZnGJkmSJOlIOAItSZIkdWACLUmSJHVgAi1JkiR1YAItSZIkdWACLUmSJHVgAi1JkiR1YAItSZIkdfA3wy2FYBAw2qsAAAAASUVORK5CYII=\n", |
|
|
1139 |
"text/plain": [ |
|
|
1140 |
"<Figure size 864x720 with 1 Axes>" |
|
|
1141 |
] |
|
|
1142 |
}, |
|
|
1143 |
"metadata": { |
|
|
1144 |
"needs_background": "light" |
|
|
1145 |
}, |
|
|
1146 |
"output_type": "display_data" |
|
|
1147 |
} |
|
|
1148 |
], |
|
|
1149 |
"source": [ |
|
|
1150 |
"sio.savemat('Result/Res_TemporalCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n", |
|
|
1151 |
"\n", |
|
|
1152 |
"fig = plt.figure(figsize=(12,10))\n", |
|
|
1153 |
"plt.grid()\n", |
|
|
1154 |
"plt.boxplot(fold_vacc)\n", |
|
|
1155 |
"plt.suptitle('Cross-Validation Accuracy\\n Parallel CNN')\n", |
|
|
1156 |
"ax = plt.gca()\n", |
|
|
1157 |
"plt.xlabel('Patient id')\n", |
|
|
1158 |
"plt.ylabel('Accuracy')\n", |
|
|
1159 |
"plt.savefig('Result/ParallelCNN.png')\n", |
|
|
1160 |
"plt.show()" |
|
|
1161 |
] |
|
|
1162 |
}, |
|
|
1163 |
{ |
|
|
1164 |
"cell_type": "code", |
|
|
1165 |
"execution_count": null, |
|
|
1166 |
"metadata": {}, |
|
|
1167 |
"outputs": [], |
|
|
1168 |
"source": [ |
|
|
1169 |
"sio.savemat('Result/Res_2dTemporalCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n", |
|
|
1170 |
"\n", |
|
|
1171 |
"fig = plt.figure(figsize=(12,10))\n", |
|
|
1172 |
"plt.grid()\n", |
|
|
1173 |
"plt.boxplot(fold_vacc)\n", |
|
|
1174 |
"plt.suptitle('Cross-Validation Accuracy\\n Parallel CNN')\n", |
|
|
1175 |
"ax = plt.gca()\n", |
|
|
1176 |
"plt.xlabel('Patient id')\n", |
|
|
1177 |
"plt.ylabel('Accuracy')\n", |
|
|
1178 |
"plt.savefig('Result/2dParallelCNN.png')\n", |
|
|
1179 |
"plt.show()" |
|
|
1180 |
] |
|
|
1181 |
}, |
|
|
1182 |
{ |
|
|
1183 |
"cell_type": "markdown", |
|
|
1184 |
"metadata": {}, |
|
|
1185 |
"source": [ |
|
|
1186 |
"## LSTM Model (C)" |
|
|
1187 |
] |
|
|
1188 |
}, |
|
|
1189 |
{ |
|
|
1190 |
"cell_type": "code", |
|
|
1191 |
"execution_count": 238, |
|
|
1192 |
"metadata": {}, |
|
|
1193 |
"outputs": [], |
|
|
1194 |
"source": [ |
|
|
1195 |
"class LSTM(nn.Module):\n", |
|
|
1196 |
" def __init__(self):\n", |
|
|
1197 |
" super(LSTM, self).__init__()\n", |
|
|
1198 |
" self.conv1 = nn.Conv2d(3,3,3)\n", |
|
|
1199 |
" self.conv2 = nn.Conv2d(3,3,5)\n", |
|
|
1200 |
" self.conv3 = nn.Conv2d(3,3,3)\n", |
|
|
1201 |
" self.conv4 = nn.Conv2d(3,3,5)\n", |
|
|
1202 |
" self.conv5 = nn.Conv2d(3,3,3)\n", |
|
|
1203 |
" self.conv6 = nn.Conv2d(3,3,5)\n", |
|
|
1204 |
" self.conv7 = nn.Conv2d(3,3,3)\n", |
|
|
1205 |
" self.conv8 = nn.Conv2d(3,3,5)\n", |
|
|
1206 |
" self.conv9 = nn.Conv2d(3,3,3)\n", |
|
|
1207 |
" self.conv10 = nn.Conv2d(3,3,5)\n", |
|
|
1208 |
" self.conv11 = nn.Conv2d(3,3,3)\n", |
|
|
1209 |
" self.conv12 = nn.Conv2d(3,3,5)\n", |
|
|
1210 |
" self.conv13 = nn.Conv2d(3,3,3)\n", |
|
|
1211 |
" self.conv14 = nn.Conv2d(3,3,5) \n", |
|
|
1212 |
" self.pool1 = nn.MaxPool2d(2)\n", |
|
|
1213 |
" self.pool2 = nn.MaxPool2d(2) \n", |
|
|
1214 |
" self.rnn1 = nn.LSTMCell(507,2)\n", |
|
|
1215 |
" self.fc1 = nn.Linear(120,512)\n", |
|
|
1216 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
1217 |
" self.max = nn.Softmax()\n", |
|
|
1218 |
" \n", |
|
|
1219 |
" def forward(self, x):\n", |
|
|
1220 |
" batch_size = x.shape[0]\n", |
|
|
1221 |
" tmp = torch.zeros(batch_size, x.shape[1], x.shape[2],26,26).cuda()\n", |
|
|
1222 |
" tmp[:,0] = F.relu(self.conv2(F.relu(self.conv1(x[:,0]))))\n", |
|
|
1223 |
" tmp[:,1] = F.relu(self.conv4(F.relu(self.conv3(x[:,1]))))\n", |
|
|
1224 |
" tmp[:,2] = F.relu(self.conv6(F.relu(self.conv5(x[:,2]))))\n", |
|
|
1225 |
" tmp[:,3] = F.relu(self.conv8(F.relu(self.conv7(x[:,3]))))\n", |
|
|
1226 |
" tmp[:,4] = F.relu(self.conv10(F.relu(self.conv9(x[:,4]))))\n", |
|
|
1227 |
" tmp[:,5] = F.relu(self.conv12(F.relu(self.conv11(x[:,5]))))\n", |
|
|
1228 |
" tmp[:,6] = F.relu(self.conv14(F.relu(self.conv13(x[:,6]))))\n", |
|
|
1229 |
" x = torch.zeros(batch_size, x.shape[1], x.shape[2],13,13).cuda()\n", |
|
|
1230 |
" for i in range(7):\n", |
|
|
1231 |
" x[:,i] = self.pool1(tmp[:,i])\n", |
|
|
1232 |
" x = x.view(batch_size,x.shape[1],-1)\n", |
|
|
1233 |
" hx = torch.randn(1,batch_size)\n", |
|
|
1234 |
" print(hx.size())\n", |
|
|
1235 |
" print(x[:,0].size())\n", |
|
|
1236 |
" self.rnn1(x[:,0],hx)\n", |
|
|
1237 |
" x = x.reshape(batch_size, x.shape[2], x.shape[1]*x.shape[3],-1) # img reshape\n", |
|
|
1238 |
" x = F.relu(self.conv15(x))\n", |
|
|
1239 |
" x = F.relu(self.conv16(x))\n", |
|
|
1240 |
" x = self.pool2(x)\n", |
|
|
1241 |
" x = x.view(batch_size,-1)\n", |
|
|
1242 |
" x = self.fc1(x)\n", |
|
|
1243 |
" x = self.fc2(x)\n", |
|
|
1244 |
" x = self.max(x)\n", |
|
|
1245 |
" return x" |
|
|
1246 |
] |
|
|
1247 |
}, |
|
|
1248 |
{ |
|
|
1249 |
"cell_type": "code", |
|
|
1250 |
"execution_count": 239, |
|
|
1251 |
"metadata": { |
|
|
1252 |
"collapsed": true, |
|
|
1253 |
"jupyter": { |
|
|
1254 |
"outputs_hidden": true |
|
|
1255 |
} |
|
|
1256 |
}, |
|
|
1257 |
"outputs": [ |
|
|
1258 |
{ |
|
|
1259 |
"name": "stdout", |
|
|
1260 |
"output_type": "stream", |
|
|
1261 |
"text": [ |
|
|
1262 |
"torch.Size([1, 2])\n", |
|
|
1263 |
"torch.Size([2, 507])\n" |
|
|
1264 |
] |
|
|
1265 |
}, |
|
|
1266 |
{ |
|
|
1267 |
"ename": "IndexError", |
|
|
1268 |
"evalue": "Dimension out of range (expected to be in range of [-1, 0], but got 1)", |
|
|
1269 |
"output_type": "error", |
|
|
1270 |
"traceback": [ |
|
|
1271 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
1272 |
"\u001b[1;31mIndexError\u001b[0m Traceback (most recent call last)", |
|
|
1273 |
"\u001b[1;32m<ipython-input-239-37f3230988bf>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mnet\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mLSTM\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mnet\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtmp\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", |
|
|
1274 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 542\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1275 |
"\u001b[1;32m<ipython-input-238-e8a90c20b4cc>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mhx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 41\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 42\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrnn1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mhx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 43\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# img reshape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv15\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1276 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 542\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1277 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\rnn.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input, hx)\u001b[0m\n\u001b[0;32m 938\u001b[0m \u001b[0mzeros\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhidden_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 939\u001b[0m \u001b[0mhx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mzeros\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 940\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcheck_forward_hidden\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'[0]'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 941\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcheck_forward_hidden\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'[1]'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 942\u001b[0m return _VF.lstm_cell(\n", |
|
|
1278 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\rnn.py\u001b[0m in \u001b[0;36mcheck_forward_hidden\u001b[1;34m(self, input, hx, hidden_label)\u001b[0m\n\u001b[0;32m 769\u001b[0m input.size(0), hidden_label, hx.size(0)))\n\u001b[0;32m 770\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 771\u001b[1;33m \u001b[1;32mif\u001b[0m \u001b[0mhx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m!=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhidden_size\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 772\u001b[0m raise RuntimeError(\n\u001b[0;32m 773\u001b[0m \"hidden{} has inconsistent hidden_size: got {}, expected {}\".format(\n", |
|
|
1279 |
"\u001b[1;31mIndexError\u001b[0m: Dimension out of range (expected to be in range of [-1, 0], but got 1)" |
|
|
1280 |
] |
|
|
1281 |
} |
|
|
1282 |
], |
|
|
1283 |
"source": [ |
|
|
1284 |
"net = LSTM().cuda()\n", |
|
|
1285 |
"net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())" |
|
|
1286 |
] |
|
|
1287 |
}, |
|
|
1288 |
{ |
|
|
1289 |
"cell_type": "code", |
|
|
1290 |
"execution_count": 499, |
|
|
1291 |
"metadata": {}, |
|
|
1292 |
"outputs": [], |
|
|
1293 |
"source": [ |
|
|
1294 |
"tot_img = Images[:,0,:,:]" |
|
|
1295 |
] |
|
|
1296 |
}, |
|
|
1297 |
{ |
|
|
1298 |
"cell_type": "code", |
|
|
1299 |
"execution_count": 393, |
|
|
1300 |
"metadata": { |
|
|
1301 |
"collapsed": true, |
|
|
1302 |
"jupyter": { |
|
|
1303 |
"outputs_hidden": true |
|
|
1304 |
} |
|
|
1305 |
}, |
|
|
1306 |
"outputs": [ |
|
|
1307 |
{ |
|
|
1308 |
"name": "stdout", |
|
|
1309 |
"output_type": "stream", |
|
|
1310 |
"text": [ |
|
|
1311 |
"Begin Training Fold 1/5\n" |
|
|
1312 |
] |
|
|
1313 |
}, |
|
|
1314 |
{ |
|
|
1315 |
"name": "stderr", |
|
|
1316 |
"output_type": "stream", |
|
|
1317 |
"text": [ |
|
|
1318 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:19: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
1319 |
] |
|
|
1320 |
}, |
|
|
1321 |
{ |
|
|
1322 |
"name": "stdout", |
|
|
1323 |
"output_type": "stream", |
|
|
1324 |
"text": [ |
|
|
1325 |
"[1, 50] loss: 1.385\tAccuracy : 0.314\t\tval-loss: 1.352\tval-Accuracy : 0.309\n", |
|
|
1326 |
"[11, 50] loss: 0.841\tAccuracy : 0.834\t\tval-loss: 0.899\tval-Accuracy : 0.845\n", |
|
|
1327 |
"[21, 50] loss: 0.826\tAccuracy : 0.865\t\tval-loss: 0.877\tval-Accuracy : 0.867\n", |
|
|
1328 |
"[31, 50] loss: 0.804\tAccuracy : 0.875\t\tval-loss: 0.876\tval-Accuracy : 0.869\n", |
|
|
1329 |
"[41, 50] loss: 0.797\tAccuracy : 0.883\t\tval-loss: 0.876\tval-Accuracy : 0.867\n", |
|
|
1330 |
"Finish Training Fold 1/5\n", |
|
|
1331 |
"Begin Training Fold 2/5\n", |
|
|
1332 |
"[1, 50] loss: 1.386\tAccuracy : 0.317\t\tval-loss: 1.370\tval-Accuracy : 0.277\n", |
|
|
1333 |
"[11, 50] loss: 0.869\tAccuracy : 0.831\t\tval-loss: 0.918\tval-Accuracy : 0.828\n", |
|
|
1334 |
"[21, 50] loss: 0.842\tAccuracy : 0.827\t\tval-loss: 0.918\tval-Accuracy : 0.824\n", |
|
|
1335 |
"[31, 50] loss: 0.825\tAccuracy : 0.831\t\tval-loss: 0.911\tval-Accuracy : 0.826\n", |
|
|
1336 |
"[41, 50] loss: 0.824\tAccuracy : 0.832\t\tval-loss: 0.907\tval-Accuracy : 0.833\n", |
|
|
1337 |
"Finish Training Fold 2/5\n", |
|
|
1338 |
"Begin Training Fold 3/5\n", |
|
|
1339 |
"[1, 50] loss: 1.384\tAccuracy : 0.482\t\tval-loss: 1.330\tval-Accuracy : 0.485\n", |
|
|
1340 |
"[11, 50] loss: 0.871\tAccuracy : 0.837\t\tval-loss: 0.913\tval-Accuracy : 0.824\n", |
|
|
1341 |
"[21, 50] loss: 0.830\tAccuracy : 0.862\t\tval-loss: 0.906\tval-Accuracy : 0.841\n", |
|
|
1342 |
"[31, 50] loss: 0.815\tAccuracy : 0.881\t\tval-loss: 0.880\tval-Accuracy : 0.861\n", |
|
|
1343 |
"[41, 50] loss: 0.813\tAccuracy : 0.878\t\tval-loss: 0.882\tval-Accuracy : 0.861\n", |
|
|
1344 |
"Finish Training Fold 3/5\n", |
|
|
1345 |
"Begin Training Fold 4/5\n", |
|
|
1346 |
"[1, 50] loss: 1.392\tAccuracy : 0.318\t\tval-loss: 1.384\tval-Accuracy : 0.348\n", |
|
|
1347 |
"[11, 50] loss: 0.927\tAccuracy : 0.791\t\tval-loss: 0.941\tval-Accuracy : 0.800\n", |
|
|
1348 |
"[21, 50] loss: 0.840\tAccuracy : 0.839\t\tval-loss: 0.917\tval-Accuracy : 0.828\n", |
|
|
1349 |
"[31, 50] loss: 0.823\tAccuracy : 0.848\t\tval-loss: 0.899\tval-Accuracy : 0.845\n", |
|
|
1350 |
"[41, 50] loss: 0.813\tAccuracy : 0.845\t\tval-loss: 0.897\tval-Accuracy : 0.846\n", |
|
|
1351 |
"Finish Training Fold 4/5\n", |
|
|
1352 |
"Begin Training Fold 5/5\n", |
|
|
1353 |
"[1, 50] loss: 1.335\tAccuracy : 0.418\t\tval-loss: 1.265\tval-Accuracy : 0.436\n", |
|
|
1354 |
"[11, 50] loss: 0.880\tAccuracy : 0.821\t\tval-loss: 0.935\tval-Accuracy : 0.807\n", |
|
|
1355 |
"[21, 50] loss: 0.832\tAccuracy : 0.850\t\tval-loss: 0.907\tval-Accuracy : 0.835\n", |
|
|
1356 |
"[31, 50] loss: 0.823\tAccuracy : 0.837\t\tval-loss: 0.905\tval-Accuracy : 0.833\n", |
|
|
1357 |
"[41, 50] loss: 0.810\tAccuracy : 0.837\t\tval-loss: 0.906\tval-Accuracy : 0.833\n", |
|
|
1358 |
"Finish Training Fold 5/5\n" |
|
|
1359 |
] |
|
|
1360 |
} |
|
|
1361 |
], |
|
|
1362 |
"source": [ |
|
|
1363 |
"id_patient = \n", |
|
|
1364 |
"\n", |
|
|
1365 |
"n_fold = 5\n", |
|
|
1366 |
"length = len(Mean_Images)\n", |
|
|
1367 |
"\n", |
|
|
1368 |
"fold_vloss = np.zeros((n_fold,n_patient))\n", |
|
|
1369 |
"fold_loss = np.zeros((n_fold,n_patient))\n", |
|
|
1370 |
"fold_vacc = np.zeros((n_fold,n_patient))\n", |
|
|
1371 |
"fold_acc = np.zeros((n_fold,n_patient))\n", |
|
|
1372 |
"\n", |
|
|
1373 |
"train_id, test_id = kfold(length,n_fold)\n", |
|
|
1374 |
"for fold in range(n_fold):\n", |
|
|
1375 |
" X_train = Mean_Images[train_id[fold]]\n", |
|
|
1376 |
" X_test = Mean_Images[test_id[fold]] \n", |
|
|
1377 |
" y_train = Label[train_id[fold]]\n", |
|
|
1378 |
" y_test = Label[test_id[fold]] \n", |
|
|
1379 |
" \n", |
|
|
1380 |
" print(\"Begin Training Fold %d/%d\" % \n", |
|
|
1381 |
" (fold+1,n_fold))\n", |
|
|
1382 |
" \n", |
|
|
1383 |
" CNN = BasicCNN().cuda(0)\n", |
|
|
1384 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
1385 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.001, momentum=0.9)\n", |
|
|
1386 |
" \n", |
|
|
1387 |
" n_epochs = 50\n", |
|
|
1388 |
" for epoch in range(n_epochs):\n", |
|
|
1389 |
" running_loss = 0.0\n", |
|
|
1390 |
" batchsize = 10\n", |
|
|
1391 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
1392 |
" optimizer.zero_grad()\n", |
|
|
1393 |
"\n", |
|
|
1394 |
" # forward + backward + optimize\n", |
|
|
1395 |
" outputs = CNN(torch.from_numpy(X_train[i:i+batchsize]).to(torch.float32).cuda())\n", |
|
|
1396 |
" loss = criterion(outputs, torch.from_numpy(y_train[i:i+batchsize]).to(torch.long).cuda())\n", |
|
|
1397 |
" loss.backward()\n", |
|
|
1398 |
" optimizer.step()\n", |
|
|
1399 |
" running_loss += loss.item()\n", |
|
|
1400 |
"\n", |
|
|
1401 |
" #acc\n", |
|
|
1402 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[:]).to(torch.float32).cuda()).data,1)\n", |
|
|
1403 |
" acc = (idx == torch.from_numpy(y_train).cuda()).sum().item()/len(y_train)\n", |
|
|
1404 |
"\n", |
|
|
1405 |
" #val Loss\n", |
|
|
1406 |
" val_outputs = CNN(torch.from_numpy(X_test[:]).to(torch.float32).cuda())\n", |
|
|
1407 |
" val_loss = criterion(val_outputs, torch.from_numpy(y_test[:]).to(torch.long).cuda())\n", |
|
|
1408 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
1409 |
" val_acc = (idx == torch.from_numpy(y_test).cuda()).sum().item()/len(y_test)\n", |
|
|
1410 |
"\n", |
|
|
1411 |
" if epoch%10==0:\n", |
|
|
1412 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1413 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
1414 |
" fold_vloss.append(val_loss)\n", |
|
|
1415 |
" fold_loss.append(running_loss/i)\n", |
|
|
1416 |
" fold_vacc.append(val_acc)\n", |
|
|
1417 |
" fold_acc.append(acc)\n", |
|
|
1418 |
" print('Finish Training Fold %d/%d' % \n", |
|
|
1419 |
" (fold+1,n_fold))" |
|
|
1420 |
] |
|
|
1421 |
}, |
|
|
1422 |
{ |
|
|
1423 |
"cell_type": "code", |
|
|
1424 |
"execution_count": 10, |
|
|
1425 |
"metadata": {}, |
|
|
1426 |
"outputs": [], |
|
|
1427 |
"source": [ |
|
|
1428 |
"class BasicCNN(nn.Module):\n", |
|
|
1429 |
" def __init__(self):\n", |
|
|
1430 |
" super(BasicCNN, self).__init__()\n", |
|
|
1431 |
" self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n", |
|
|
1432 |
" self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
1433 |
" self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
1434 |
" self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
1435 |
" self.pool = nn.MaxPool2d((2,2))\n", |
|
|
1436 |
" self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
1437 |
" self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
1438 |
" self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n", |
|
|
1439 |
" self.fc1 = nn.Linear(507,512)\n", |
|
|
1440 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
1441 |
" self.max = nn.Softmax()\n", |
|
|
1442 |
" \n", |
|
|
1443 |
" def forward(self, x):\n", |
|
|
1444 |
" batch_size = x.shape[0]\n", |
|
|
1445 |
" x = F.relu(self.conv1(x))\n", |
|
|
1446 |
" x = F.relu(self.conv2(x))\n", |
|
|
1447 |
" x = F.relu(self.conv3(x))\n", |
|
|
1448 |
" x = F.relu(self.conv4(x))\n", |
|
|
1449 |
" x = self.pool(x)\n", |
|
|
1450 |
" x = F.relu(self.conv5(x))\n", |
|
|
1451 |
" x = F.relu(self.conv6(x))\n", |
|
|
1452 |
" x = self.pool(x)\n", |
|
|
1453 |
" x = F.relu(self.conv7(x))\n", |
|
|
1454 |
" x = self.pool(x)\n", |
|
|
1455 |
" return x" |
|
|
1456 |
] |
|
|
1457 |
}, |
|
|
1458 |
{ |
|
|
1459 |
"cell_type": "code", |
|
|
1460 |
"execution_count": null, |
|
|
1461 |
"metadata": {}, |
|
|
1462 |
"outputs": [], |
|
|
1463 |
"source": [] |
|
|
1464 |
}, |
|
|
1465 |
{ |
|
|
1466 |
"cell_type": "code", |
|
|
1467 |
"execution_count": 51, |
|
|
1468 |
"metadata": {}, |
|
|
1469 |
"outputs": [ |
|
|
1470 |
{ |
|
|
1471 |
"data": { |
|
|
1472 |
"text/plain": [ |
|
|
1473 |
"192" |
|
|
1474 |
] |
|
|
1475 |
}, |
|
|
1476 |
"execution_count": 51, |
|
|
1477 |
"metadata": {}, |
|
|
1478 |
"output_type": "execute_result" |
|
|
1479 |
} |
|
|
1480 |
], |
|
|
1481 |
"source": [ |
|
|
1482 |
"64*3" |
|
|
1483 |
] |
|
|
1484 |
}, |
|
|
1485 |
{ |
|
|
1486 |
"cell_type": "code", |
|
|
1487 |
"execution_count": 36, |
|
|
1488 |
"metadata": {}, |
|
|
1489 |
"outputs": [], |
|
|
1490 |
"source": [ |
|
|
1491 |
"class MaxCNN(nn.Module):\n", |
|
|
1492 |
" def __init__(self, input_image, kernel=(3,3), stride=1, padding=1,max_kernel=(2,2)):\n", |
|
|
1493 |
" super(MaxCNN, self).__init__()\n", |
|
|
1494 |
" \n", |
|
|
1495 |
" \n", |
|
|
1496 |
" n_window = input_image.shape[1]\n", |
|
|
1497 |
" n_channel = input_image.shape[2]\n", |
|
|
1498 |
" \n", |
|
|
1499 |
" self.conv1 = nn.Conv2d(n_channel,32,kernel,stride=stride, padding=padding)\n", |
|
|
1500 |
" self.conv2 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n", |
|
|
1501 |
" self.conv3 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n", |
|
|
1502 |
" self.conv4 = nn.Conv2d(32,32,kernel,stride=stride, padding=padding)\n", |
|
|
1503 |
" self.pool1 = nn.MaxPool2d(max_kernel)\n", |
|
|
1504 |
" self.conv5 = nn.Conv2d(32,64,kernel,stride=stride,padding=padding)\n", |
|
|
1505 |
" self.conv6 = nn.Conv2d(64,64,kernel,stride=stride,padding=padding)\n", |
|
|
1506 |
" self.conv7 = nn.Conv2d(64,128,kernel,stride=stride,padding=padding)\n", |
|
|
1507 |
" \n", |
|
|
1508 |
" self.pool = nn.MaxPool2d((n_window,1))\n", |
|
|
1509 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
1510 |
" \n", |
|
|
1511 |
" self.fc = nn.Linear(n_window*int(4*4*128/n_window),512)\n", |
|
|
1512 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
1513 |
" self.max = nn.LogSoftmax()\n", |
|
|
1514 |
"\n", |
|
|
1515 |
" def forward(self, x):\n", |
|
|
1516 |
" if x.get_device() == 0:\n", |
|
|
1517 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n", |
|
|
1518 |
" else:\n", |
|
|
1519 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n", |
|
|
1520 |
" for i in range(7):\n", |
|
|
1521 |
" tmp[:,i] = self.pool1( F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n", |
|
|
1522 |
" x = tmp.reshape(x.shape[0], x.shape[1],4*128*4,1)\n", |
|
|
1523 |
" x = self.pool(x)\n", |
|
|
1524 |
" x = x.view(x.shape[0],-1)\n", |
|
|
1525 |
" x = self.fc2(self.fc(x))\n", |
|
|
1526 |
" x = self.max(x)\n", |
|
|
1527 |
" return x" |
|
|
1528 |
] |
|
|
1529 |
}, |
|
|
1530 |
{ |
|
|
1531 |
"cell_type": "code", |
|
|
1532 |
"execution_count": 38, |
|
|
1533 |
"metadata": {}, |
|
|
1534 |
"outputs": [], |
|
|
1535 |
"source": [ |
|
|
1536 |
"class MaxCNN(nn.Module):\n", |
|
|
1537 |
" def __init__(self):\n", |
|
|
1538 |
" super(MaxCNN, self).__init__()\n", |
|
|
1539 |
" \n", |
|
|
1540 |
" \n", |
|
|
1541 |
" self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n", |
|
|
1542 |
" self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
1543 |
" self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
1544 |
" self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
1545 |
" self.pool1 = nn.MaxPool2d((2,2))\n", |
|
|
1546 |
" self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
1547 |
" self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
1548 |
" self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n", |
|
|
1549 |
" \n", |
|
|
1550 |
" \n", |
|
|
1551 |
" self.pool = nn.MaxPool2d((7,1))\n", |
|
|
1552 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
1553 |
" self.fc = nn.Linear(2044,512)\n", |
|
|
1554 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
1555 |
" self.max = nn.LogSoftmax()\n", |
|
|
1556 |
" \n", |
|
|
1557 |
" def forward(self, x):\n", |
|
|
1558 |
" if x.get_device() == 0:\n", |
|
|
1559 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n", |
|
|
1560 |
" else:\n", |
|
|
1561 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n", |
|
|
1562 |
" for i in range(7):\n", |
|
|
1563 |
" tmp[:,i] = self.pool1( F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n", |
|
|
1564 |
" x = tmp.reshape(x.shape[0], x.shape[1],4*128*4,1)\n", |
|
|
1565 |
" x = self.pool(x)\n", |
|
|
1566 |
" x = x.view(x.shape[0],-1)\n", |
|
|
1567 |
" #x = self.drop(x)\n", |
|
|
1568 |
" x = self.fc2(self.fc(x))\n", |
|
|
1569 |
" x = self.max(x)\n", |
|
|
1570 |
" return x" |
|
|
1571 |
] |
|
|
1572 |
}, |
|
|
1573 |
{ |
|
|
1574 |
"cell_type": "code", |
|
|
1575 |
"execution_count": 43, |
|
|
1576 |
"metadata": { |
|
|
1577 |
"collapsed": true, |
|
|
1578 |
"jupyter": { |
|
|
1579 |
"outputs_hidden": true |
|
|
1580 |
} |
|
|
1581 |
}, |
|
|
1582 |
"outputs": [ |
|
|
1583 |
{ |
|
|
1584 |
"name": "stdout", |
|
|
1585 |
"output_type": "stream", |
|
|
1586 |
"text": [ |
|
|
1587 |
"Begin Training rep 1/5\t of Patient 1\n" |
|
|
1588 |
] |
|
|
1589 |
}, |
|
|
1590 |
{ |
|
|
1591 |
"name": "stderr", |
|
|
1592 |
"output_type": "stream", |
|
|
1593 |
"text": [ |
|
|
1594 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:34: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
1595 |
] |
|
|
1596 |
}, |
|
|
1597 |
{ |
|
|
1598 |
"ename": "KeyboardInterrupt", |
|
|
1599 |
"evalue": "", |
|
|
1600 |
"output_type": "error", |
|
|
1601 |
"traceback": [ |
|
|
1602 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
1603 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", |
|
|
1604 |
"\u001b[1;32m<ipython-input-43-18f565b68b5f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 38\u001b[0m \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 39\u001b[0m \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 40\u001b[1;33m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 41\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 42\u001b[0m \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1605 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[0mproducts\u001b[0m\u001b[1;33m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 165\u001b[0m \"\"\"\n\u001b[1;32m--> 166\u001b[1;33m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 167\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 168\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1606 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\autograd\\__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[0;32m 97\u001b[0m Variable._execution_engine.run_backward(\n\u001b[0;32m 98\u001b[0m \u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 99\u001b[1;33m allow_unreachable=True) # allow_unreachable flag\n\u001b[0m\u001b[0;32m 100\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 101\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1607 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m: " |
|
|
1608 |
] |
|
|
1609 |
} |
|
|
1610 |
], |
|
|
1611 |
"source": [ |
|
|
1612 |
"p = 0\n", |
|
|
1613 |
"n_rep = 5 \n", |
|
|
1614 |
"n_patient = len(np.unique(Patient))\n", |
|
|
1615 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
1616 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
1617 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
1618 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
1619 |
"\n", |
|
|
1620 |
"for patient in np.unique(Patient):\n", |
|
|
1621 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
1622 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
1623 |
" \n", |
|
|
1624 |
" for rep in range(n_rep):\n", |
|
|
1625 |
" np.random.shuffle(id_patient)\n", |
|
|
1626 |
" np.random.shuffle(id_train)\n", |
|
|
1627 |
" \n", |
|
|
1628 |
" X_train = tmp[id_train]\n", |
|
|
1629 |
" X_test = tmp[id_patient]\n", |
|
|
1630 |
" y_train = Label[id_train]\n", |
|
|
1631 |
" y_test = Label[id_patient]\n", |
|
|
1632 |
" \n", |
|
|
1633 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
1634 |
" (rep+1,n_rep, patient))\n", |
|
|
1635 |
" \n", |
|
|
1636 |
" CNN = MaxCNN().cuda()\n", |
|
|
1637 |
" criterion = nn.NLLLoss()\n", |
|
|
1638 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n", |
|
|
1639 |
" \n", |
|
|
1640 |
" n_epochs = 45\n", |
|
|
1641 |
" for epoch in range(n_epochs):\n", |
|
|
1642 |
" running_loss = 0.0\n", |
|
|
1643 |
" batchsize = 32\n", |
|
|
1644 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
1645 |
" \n", |
|
|
1646 |
" CNN.to(torch.device(\"cuda\"))\n", |
|
|
1647 |
" optimizer.zero_grad()\n", |
|
|
1648 |
" # forward + backward + optimize\n", |
|
|
1649 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
1650 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
1651 |
" loss.backward()\n", |
|
|
1652 |
" optimizer.step()\n", |
|
|
1653 |
" running_loss += loss.item()\n", |
|
|
1654 |
" \n", |
|
|
1655 |
" if epoch==50:\n", |
|
|
1656 |
" cnn_cpu = CNN.to(torch.device(\"cuda\"))\n", |
|
|
1657 |
"\n", |
|
|
1658 |
" check_id = np.arange(2000)\n", |
|
|
1659 |
" np.random.shuffle(check_id)\n", |
|
|
1660 |
" #acc\n", |
|
|
1661 |
" acc = np.zeros(len(y_train))\n", |
|
|
1662 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
1663 |
" _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n", |
|
|
1664 |
" acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
1665 |
" acc = np.mean(acc)\n", |
|
|
1666 |
" \n", |
|
|
1667 |
" #validation\n", |
|
|
1668 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
1669 |
" val_loss = []\n", |
|
|
1670 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
1671 |
" val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n", |
|
|
1672 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
1673 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n", |
|
|
1674 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
1675 |
" val_acc = np.mean(val_acc)\n", |
|
|
1676 |
" val_loss = np.mean(val_loss)\n", |
|
|
1677 |
"\n", |
|
|
1678 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1679 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
1680 |
"\n", |
|
|
1681 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
1682 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
1683 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
1684 |
" fold_acc[rep, p] = acc\n", |
|
|
1685 |
" \n", |
|
|
1686 |
" \n", |
|
|
1687 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1688 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
1689 |
" \n", |
|
|
1690 |
" p = p + 1" |
|
|
1691 |
] |
|
|
1692 |
}, |
|
|
1693 |
{ |
|
|
1694 |
"cell_type": "code", |
|
|
1695 |
"execution_count": 8, |
|
|
1696 |
"metadata": { |
|
|
1697 |
"scrolled": true |
|
|
1698 |
}, |
|
|
1699 |
"outputs": [ |
|
|
1700 |
{ |
|
|
1701 |
"name": "stdout", |
|
|
1702 |
"output_type": "stream", |
|
|
1703 |
"text": [ |
|
|
1704 |
"Begin Training rep 1/5\t of Patient 11\n" |
|
|
1705 |
] |
|
|
1706 |
}, |
|
|
1707 |
{ |
|
|
1708 |
"name": "stderr", |
|
|
1709 |
"output_type": "stream", |
|
|
1710 |
"text": [ |
|
|
1711 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:34: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
1712 |
] |
|
|
1713 |
}, |
|
|
1714 |
{ |
|
|
1715 |
"name": "stdout", |
|
|
1716 |
"output_type": "stream", |
|
|
1717 |
"text": [ |
|
|
1718 |
"[1, 45] loss: 1.402\tAccuracy : 0.286\t\tval-loss: 1.379\tval-Accuracy : 0.258\n", |
|
|
1719 |
"[6, 45] loss: 1.397\tAccuracy : 0.286\t\tval-loss: 1.368\tval-Accuracy : 0.258\n", |
|
|
1720 |
"[11, 45] loss: 1.207\tAccuracy : 0.482\t\tval-loss: 0.958\tval-Accuracy : 0.516\n", |
|
|
1721 |
"[16, 45] loss: 1.054\tAccuracy : 0.578\t\tval-loss: 0.800\tval-Accuracy : 0.613\n", |
|
|
1722 |
"[21, 45] loss: 0.941\tAccuracy : 0.656\t\tval-loss: 0.727\tval-Accuracy : 0.689\n", |
|
|
1723 |
"[26, 45] loss: 0.793\tAccuracy : 0.741\t\tval-loss: 0.627\tval-Accuracy : 0.787\n", |
|
|
1724 |
"[31, 45] loss: 0.565\tAccuracy : 0.845\t\tval-loss: 0.306\tval-Accuracy : 0.902\n", |
|
|
1725 |
"[36, 45] loss: 0.332\tAccuracy : 0.887\t\tval-loss: 0.214\tval-Accuracy : 0.933\n", |
|
|
1726 |
"[41, 45] loss: 0.232\tAccuracy : 0.912\t\tval-loss: 0.218\tval-Accuracy : 0.920\n", |
|
|
1727 |
"Begin Training rep 2/5\t of Patient 11\n", |
|
|
1728 |
"[1, 45] loss: 1.401\tAccuracy : 0.358\t\tval-loss: 1.379\tval-Accuracy : 0.387\n", |
|
|
1729 |
"[6, 45] loss: 1.389\tAccuracy : 0.286\t\tval-loss: 1.365\tval-Accuracy : 0.258\n", |
|
|
1730 |
"[11, 45] loss: 1.136\tAccuracy : 0.548\t\tval-loss: 1.012\tval-Accuracy : 0.578\n", |
|
|
1731 |
"[16, 45] loss: 1.019\tAccuracy : 0.606\t\tval-loss: 0.904\tval-Accuracy : 0.582\n", |
|
|
1732 |
"[21, 45] loss: 0.908\tAccuracy : 0.665\t\tval-loss: 0.820\tval-Accuracy : 0.640\n", |
|
|
1733 |
"[26, 45] loss: 0.771\tAccuracy : 0.598\t\tval-loss: 0.890\tval-Accuracy : 0.622\n", |
|
|
1734 |
"[31, 45] loss: 0.457\tAccuracy : 0.858\t\tval-loss: 0.644\tval-Accuracy : 0.840\n", |
|
|
1735 |
"[36, 45] loss: 0.326\tAccuracy : 0.889\t\tval-loss: 0.718\tval-Accuracy : 0.871\n", |
|
|
1736 |
"[41, 45] loss: 0.234\tAccuracy : 0.923\t\tval-loss: 0.713\tval-Accuracy : 0.907\n", |
|
|
1737 |
"Begin Training rep 3/5\t of Patient 11\n", |
|
|
1738 |
"[1, 45] loss: 1.403\tAccuracy : 0.286\t\tval-loss: 1.385\tval-Accuracy : 0.258\n", |
|
|
1739 |
"[6, 45] loss: 1.397\tAccuracy : 0.286\t\tval-loss: 1.392\tval-Accuracy : 0.258\n", |
|
|
1740 |
"[11, 45] loss: 1.378\tAccuracy : 0.286\t\tval-loss: 1.351\tval-Accuracy : 0.258\n", |
|
|
1741 |
"[16, 45] loss: 1.115\tAccuracy : 0.550\t\tval-loss: 0.995\tval-Accuracy : 0.587\n", |
|
|
1742 |
"[21, 45] loss: 0.998\tAccuracy : 0.599\t\tval-loss: 0.896\tval-Accuracy : 0.604\n", |
|
|
1743 |
"[26, 45] loss: 0.867\tAccuracy : 0.667\t\tval-loss: 0.762\tval-Accuracy : 0.680\n", |
|
|
1744 |
"[31, 45] loss: 0.635\tAccuracy : 0.816\t\tval-loss: 0.418\tval-Accuracy : 0.880\n", |
|
|
1745 |
"[36, 45] loss: 0.361\tAccuracy : 0.889\t\tval-loss: 0.248\tval-Accuracy : 0.916\n", |
|
|
1746 |
"[41, 45] loss: 0.245\tAccuracy : 0.907\t\tval-loss: 0.197\tval-Accuracy : 0.933\n", |
|
|
1747 |
"Begin Training rep 4/5\t of Patient 11\n", |
|
|
1748 |
"[1, 45] loss: 1.404\tAccuracy : 0.268\t\tval-loss: 1.392\tval-Accuracy : 0.262\n", |
|
|
1749 |
"[6, 45] loss: 1.396\tAccuracy : 0.286\t\tval-loss: 1.410\tval-Accuracy : 0.258\n", |
|
|
1750 |
"[11, 45] loss: 1.218\tAccuracy : 0.483\t\tval-loss: 1.115\tval-Accuracy : 0.520\n", |
|
|
1751 |
"[16, 45] loss: 1.053\tAccuracy : 0.578\t\tval-loss: 0.800\tval-Accuracy : 0.600\n", |
|
|
1752 |
"[21, 45] loss: 0.927\tAccuracy : 0.655\t\tval-loss: 0.742\tval-Accuracy : 0.662\n", |
|
|
1753 |
"[26, 45] loss: 0.795\tAccuracy : 0.767\t\tval-loss: 0.661\tval-Accuracy : 0.787\n", |
|
|
1754 |
"[31, 45] loss: 0.509\tAccuracy : 0.793\t\tval-loss: 0.645\tval-Accuracy : 0.804\n", |
|
|
1755 |
"[36, 45] loss: 0.381\tAccuracy : 0.884\t\tval-loss: 0.299\tval-Accuracy : 0.920\n", |
|
|
1756 |
"[41, 45] loss: 0.299\tAccuracy : 0.897\t\tval-loss: 0.265\tval-Accuracy : 0.907\n", |
|
|
1757 |
"Begin Training rep 5/5\t of Patient 11\n", |
|
|
1758 |
"[1, 45] loss: 1.404\tAccuracy : 0.285\t\tval-loss: 1.393\tval-Accuracy : 0.258\n", |
|
|
1759 |
"[6, 45] loss: 1.397\tAccuracy : 0.286\t\tval-loss: 1.407\tval-Accuracy : 0.258\n", |
|
|
1760 |
"[11, 45] loss: 1.396\tAccuracy : 0.286\t\tval-loss: 1.408\tval-Accuracy : 0.258\n", |
|
|
1761 |
"[16, 45] loss: 1.238\tAccuracy : 0.454\t\tval-loss: 1.161\tval-Accuracy : 0.498\n", |
|
|
1762 |
"[21, 45] loss: 1.053\tAccuracy : 0.570\t\tval-loss: 1.031\tval-Accuracy : 0.578\n", |
|
|
1763 |
"[26, 45] loss: 0.924\tAccuracy : 0.627\t\tval-loss: 0.993\tval-Accuracy : 0.604\n", |
|
|
1764 |
"[31, 45] loss: 0.763\tAccuracy : 0.742\t\tval-loss: 0.782\tval-Accuracy : 0.751\n", |
|
|
1765 |
"[36, 45] loss: 0.576\tAccuracy : 0.750\t\tval-loss: 0.740\tval-Accuracy : 0.769\n", |
|
|
1766 |
"[41, 45] loss: 0.390\tAccuracy : 0.881\t\tval-loss: 0.282\tval-Accuracy : 0.929\n", |
|
|
1767 |
"loss: 0.203\tAccuracy : 0.904\t\tval-loss: 0.335\tval-Accuracy : 0.919\n", |
|
|
1768 |
"Begin Training rep 1/5\t of Patient 12\n", |
|
|
1769 |
"[1, 45] loss: 1.402\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
1770 |
"[6, 45] loss: 1.262\tAccuracy : 0.450\t\tval-loss: 1.258\tval-Accuracy : 0.493\n", |
|
|
1771 |
"[11, 45] loss: 1.069\tAccuracy : 0.572\t\tval-loss: 1.008\tval-Accuracy : 0.622\n", |
|
|
1772 |
"[16, 45] loss: 0.983\tAccuracy : 0.614\t\tval-loss: 1.032\tval-Accuracy : 0.622\n", |
|
|
1773 |
"[21, 45] loss: 0.852\tAccuracy : 0.650\t\tval-loss: 1.417\tval-Accuracy : 0.631\n", |
|
|
1774 |
"[26, 45] loss: 0.659\tAccuracy : 0.766\t\tval-loss: 1.111\tval-Accuracy : 0.700\n", |
|
|
1775 |
"[31, 45] loss: 0.426\tAccuracy : 0.883\t\tval-loss: 0.603\tval-Accuracy : 0.843\n", |
|
|
1776 |
"[36, 45] loss: 0.291\tAccuracy : 0.898\t\tval-loss: 0.524\tval-Accuracy : 0.829\n", |
|
|
1777 |
"[41, 45] loss: 0.191\tAccuracy : 0.915\t\tval-loss: 0.421\tval-Accuracy : 0.894\n", |
|
|
1778 |
"Begin Training rep 2/5\t of Patient 12\n", |
|
|
1779 |
"[1, 45] loss: 1.402\tAccuracy : 0.285\t\tval-loss: 1.385\tval-Accuracy : 0.263\n", |
|
|
1780 |
"[6, 45] loss: 1.397\tAccuracy : 0.285\t\tval-loss: 1.385\tval-Accuracy : 0.263\n", |
|
|
1781 |
"[11, 45] loss: 1.238\tAccuracy : 0.468\t\tval-loss: 1.232\tval-Accuracy : 0.488\n", |
|
|
1782 |
"[16, 45] loss: 1.052\tAccuracy : 0.534\t\tval-loss: 1.095\tval-Accuracy : 0.562\n", |
|
|
1783 |
"[21, 45] loss: 0.943\tAccuracy : 0.623\t\tval-loss: 0.969\tval-Accuracy : 0.631\n", |
|
|
1784 |
"[26, 45] loss: 0.790\tAccuracy : 0.736\t\tval-loss: 0.996\tval-Accuracy : 0.705\n", |
|
|
1785 |
"[31, 45] loss: 0.522\tAccuracy : 0.852\t\tval-loss: 0.756\tval-Accuracy : 0.742\n", |
|
|
1786 |
"[36, 45] loss: 0.351\tAccuracy : 0.882\t\tval-loss: 0.699\tval-Accuracy : 0.779\n", |
|
|
1787 |
"[41, 45] loss: 0.274\tAccuracy : 0.899\t\tval-loss: 0.653\tval-Accuracy : 0.788\n", |
|
|
1788 |
"Begin Training rep 3/5\t of Patient 12\n", |
|
|
1789 |
"[1, 45] loss: 1.403\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
1790 |
"[6, 45] loss: 1.396\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
1791 |
"[11, 45] loss: 1.215\tAccuracy : 0.490\t\tval-loss: 1.180\tval-Accuracy : 0.544\n", |
|
|
1792 |
"[16, 45] loss: 1.044\tAccuracy : 0.551\t\tval-loss: 0.995\tval-Accuracy : 0.576\n", |
|
|
1793 |
"[21, 45] loss: 0.947\tAccuracy : 0.605\t\tval-loss: 0.976\tval-Accuracy : 0.581\n", |
|
|
1794 |
"[26, 45] loss: 0.819\tAccuracy : 0.723\t\tval-loss: 0.926\tval-Accuracy : 0.691\n", |
|
|
1795 |
"[31, 45] loss: 0.541\tAccuracy : 0.843\t\tval-loss: 0.697\tval-Accuracy : 0.760\n", |
|
|
1796 |
"[36, 45] loss: 0.323\tAccuracy : 0.851\t\tval-loss: 1.152\tval-Accuracy : 0.673\n", |
|
|
1797 |
"[41, 45] loss: 0.197\tAccuracy : 0.887\t\tval-loss: 1.182\tval-Accuracy : 0.742\n", |
|
|
1798 |
"Begin Training rep 4/5\t of Patient 12\n", |
|
|
1799 |
"[1, 45] loss: 1.404\tAccuracy : 0.287\t\tval-loss: 1.385\tval-Accuracy : 0.263\n", |
|
|
1800 |
"[6, 45] loss: 1.392\tAccuracy : 0.285\t\tval-loss: 1.380\tval-Accuracy : 0.263\n", |
|
|
1801 |
"[11, 45] loss: 1.132\tAccuracy : 0.528\t\tval-loss: 1.179\tval-Accuracy : 0.581\n", |
|
|
1802 |
"[16, 45] loss: 0.999\tAccuracy : 0.591\t\tval-loss: 1.066\tval-Accuracy : 0.618\n", |
|
|
1803 |
"[21, 45] loss: 0.879\tAccuracy : 0.673\t\tval-loss: 1.006\tval-Accuracy : 0.599\n", |
|
|
1804 |
"[26, 45] loss: 0.689\tAccuracy : 0.733\t\tval-loss: 1.066\tval-Accuracy : 0.641\n", |
|
|
1805 |
"[31, 45] loss: 0.460\tAccuracy : 0.790\t\tval-loss: 0.968\tval-Accuracy : 0.691\n", |
|
|
1806 |
"[36, 45] loss: 0.288\tAccuracy : 0.889\t\tval-loss: 0.541\tval-Accuracy : 0.843\n", |
|
|
1807 |
"[41, 45] loss: 0.218\tAccuracy : 0.917\t\tval-loss: 0.455\tval-Accuracy : 0.871\n", |
|
|
1808 |
"Begin Training rep 5/5\t of Patient 12\n", |
|
|
1809 |
"[1, 45] loss: 1.403\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
1810 |
"[6, 45] loss: 1.252\tAccuracy : 0.456\t\tval-loss: 1.223\tval-Accuracy : 0.530\n", |
|
|
1811 |
"[11, 45] loss: 1.046\tAccuracy : 0.583\t\tval-loss: 1.013\tval-Accuracy : 0.618\n", |
|
|
1812 |
"[16, 45] loss: 0.926\tAccuracy : 0.628\t\tval-loss: 0.999\tval-Accuracy : 0.622\n", |
|
|
1813 |
"[21, 45] loss: 0.741\tAccuracy : 0.743\t\tval-loss: 0.754\tval-Accuracy : 0.719\n", |
|
|
1814 |
"[26, 45] loss: 0.724\tAccuracy : 0.669\t\tval-loss: 1.009\tval-Accuracy : 0.664\n", |
|
|
1815 |
"[31, 45] loss: 0.281\tAccuracy : 0.892\t\tval-loss: 0.760\tval-Accuracy : 0.816\n", |
|
|
1816 |
"[36, 45] loss: 0.236\tAccuracy : 0.905\t\tval-loss: 0.652\tval-Accuracy : 0.839\n", |
|
|
1817 |
"[41, 45] loss: 0.227\tAccuracy : 0.910\t\tval-loss: 0.666\tval-Accuracy : 0.848\n", |
|
|
1818 |
"loss: 0.229\tAccuracy : 0.906\t\tval-loss: 0.675\tval-Accuracy : 0.829\n", |
|
|
1819 |
"Begin Training rep 1/5\t of Patient 13\n" |
|
|
1820 |
] |
|
|
1821 |
}, |
|
|
1822 |
{ |
|
|
1823 |
"ename": "RuntimeError", |
|
|
1824 |
"evalue": "non-empty 3D or 4D input tensor expected but got ndim: 4", |
|
|
1825 |
"output_type": "error", |
|
|
1826 |
"traceback": [ |
|
|
1827 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
1828 |
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)", |
|
|
1829 |
"\u001b[1;32m<ipython-input-8-57c097f46e05>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[0mval_loss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 61\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_acc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 62\u001b[1;33m \u001b[0mval_outputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcnn_cpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 63\u001b[0m \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0midx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_outputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 64\u001b[0m \u001b[0mval_loss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcriterion\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_outputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_test\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1830 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 542\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1831 |
"\u001b[1;32m<ipython-input-7-a97bbd16b095>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[0mtmp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 27\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 28\u001b[1;33m \u001b[0mtmp\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv7\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv6\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv5\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv4\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv3\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 29\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtmp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1832 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 542\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1833 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\pooling.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 139\u001b[0m return F.max_pool2d(input, self.kernel_size, self.stride,\n\u001b[0;32m 140\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpadding\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdilation\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mceil_mode\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 141\u001b[1;33m self.return_indices)\n\u001b[0m\u001b[0;32m 142\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 143\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1834 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\_jit_internal.py\u001b[0m in \u001b[0;36mfn\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 136\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 137\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 138\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 139\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 140\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
1835 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\functional.py\u001b[0m in \u001b[0;36m_max_pool2d\u001b[1;34m(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)\u001b[0m\n\u001b[0;32m 486\u001b[0m \u001b[0mstride\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mannotate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mList\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 487\u001b[0m return torch.max_pool2d(\n\u001b[1;32m--> 488\u001b[1;33m input, kernel_size, stride, padding, dilation, ceil_mode)\n\u001b[0m\u001b[0;32m 489\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 490\u001b[0m max_pool2d = boolean_dispatch(\n", |
|
|
1836 |
"\u001b[1;31mRuntimeError\u001b[0m: non-empty 3D or 4D input tensor expected but got ndim: 4" |
|
|
1837 |
] |
|
|
1838 |
} |
|
|
1839 |
], |
|
|
1840 |
"source": [ |
|
|
1841 |
"p = 12\n", |
|
|
1842 |
"n_rep = 5 \n", |
|
|
1843 |
"n_patient = len(np.unique(Patient))\n", |
|
|
1844 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
1845 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
1846 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
1847 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
1848 |
"\n", |
|
|
1849 |
"for patient in np.unique(Patient):\n", |
|
|
1850 |
" patient = patient + 13\n", |
|
|
1851 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
1852 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
1853 |
" \n", |
|
|
1854 |
" for rep in range(n_rep):\n", |
|
|
1855 |
" np.random.shuffle(id_patient)\n", |
|
|
1856 |
" np.random.shuffle(id_train)\n", |
|
|
1857 |
" \n", |
|
|
1858 |
" X_train = tmp[id_train]\n", |
|
|
1859 |
" X_test = tmp[id_patient]\n", |
|
|
1860 |
" y_train = Label[id_train]\n", |
|
|
1861 |
" y_test = Label[id_patient]\n", |
|
|
1862 |
" \n", |
|
|
1863 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
1864 |
" (rep+1,n_rep, patient))\n", |
|
|
1865 |
" \n", |
|
|
1866 |
" CNN = MaxCNN().cuda()\n", |
|
|
1867 |
" criterion = nn.NLLLoss()\n", |
|
|
1868 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n", |
|
|
1869 |
" \n", |
|
|
1870 |
" n_epochs = 45\n", |
|
|
1871 |
" for epoch in range(n_epochs):\n", |
|
|
1872 |
" running_loss = 0.0\n", |
|
|
1873 |
" batchsize = 32\n", |
|
|
1874 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
1875 |
" \n", |
|
|
1876 |
" CNN.to(torch.device(\"cuda\"))\n", |
|
|
1877 |
" optimizer.zero_grad()\n", |
|
|
1878 |
" # forward + backward + optimize\n", |
|
|
1879 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
1880 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
1881 |
" loss.backward()\n", |
|
|
1882 |
" optimizer.step()\n", |
|
|
1883 |
" running_loss += loss.item()\n", |
|
|
1884 |
" \n", |
|
|
1885 |
" if epoch%5==0:\n", |
|
|
1886 |
" cnn_cpu = CNN.to(torch.device(\"cpu\"))\n", |
|
|
1887 |
"\n", |
|
|
1888 |
" check_id = np.arange(2000)\n", |
|
|
1889 |
" np.random.shuffle(check_id)\n", |
|
|
1890 |
" \n", |
|
|
1891 |
" #acc\n", |
|
|
1892 |
" acc = np.zeros(len(y_train))\n", |
|
|
1893 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
1894 |
" _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n", |
|
|
1895 |
" acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
1896 |
" acc = np.mean(acc)\n", |
|
|
1897 |
" \n", |
|
|
1898 |
" #validation\n", |
|
|
1899 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
1900 |
" val_loss = []\n", |
|
|
1901 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
1902 |
" val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n", |
|
|
1903 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
1904 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n", |
|
|
1905 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
1906 |
" val_acc = np.mean(val_acc)\n", |
|
|
1907 |
" val_loss = np.mean(val_loss)\n", |
|
|
1908 |
"\n", |
|
|
1909 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1910 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
1911 |
"\n", |
|
|
1912 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
1913 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
1914 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
1915 |
" fold_acc[rep, p] = acc\n", |
|
|
1916 |
" \n", |
|
|
1917 |
" \n", |
|
|
1918 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
1919 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
1920 |
" \n", |
|
|
1921 |
" p = p + 1" |
|
|
1922 |
] |
|
|
1923 |
}, |
|
|
1924 |
{ |
|
|
1925 |
"cell_type": "code", |
|
|
1926 |
"execution_count": 14, |
|
|
1927 |
"metadata": { |
|
|
1928 |
"scrolled": true |
|
|
1929 |
}, |
|
|
1930 |
"outputs": [ |
|
|
1931 |
{ |
|
|
1932 |
"name": "stdout", |
|
|
1933 |
"output_type": "stream", |
|
|
1934 |
"text": [ |
|
|
1935 |
"Begin Training rep 1/5\t of Patient 14\n" |
|
|
1936 |
] |
|
|
1937 |
}, |
|
|
1938 |
{ |
|
|
1939 |
"name": "stderr", |
|
|
1940 |
"output_type": "stream", |
|
|
1941 |
"text": [ |
|
|
1942 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:34: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
1943 |
] |
|
|
1944 |
}, |
|
|
1945 |
{ |
|
|
1946 |
"name": "stdout", |
|
|
1947 |
"output_type": "stream", |
|
|
1948 |
"text": [ |
|
|
1949 |
"[1, 45] loss: 1.403\tAccuracy : 0.286\t\tval-loss: 1.383\tval-Accuracy : 0.273\n", |
|
|
1950 |
"[6, 45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.380\tval-Accuracy : 0.278\n", |
|
|
1951 |
"[11, 45] loss: 1.243\tAccuracy : 0.464\t\tval-loss: 1.235\tval-Accuracy : 0.464\n", |
|
|
1952 |
"[16, 45] loss: 1.071\tAccuracy : 0.552\t\tval-loss: 1.099\tval-Accuracy : 0.565\n", |
|
|
1953 |
"[21, 45] loss: 0.970\tAccuracy : 0.621\t\tval-loss: 0.996\tval-Accuracy : 0.612\n", |
|
|
1954 |
"[26, 45] loss: 0.838\tAccuracy : 0.715\t\tval-loss: 0.963\tval-Accuracy : 0.641\n", |
|
|
1955 |
"[31, 45] loss: 0.609\tAccuracy : 0.831\t\tval-loss: 0.720\tval-Accuracy : 0.785\n", |
|
|
1956 |
"[36, 45] loss: 0.392\tAccuracy : 0.906\t\tval-loss: 0.601\tval-Accuracy : 0.656\n", |
|
|
1957 |
"[41, 45] loss: 0.243\tAccuracy : 0.912\t\tval-loss: 0.485\tval-Accuracy : 0.675\n", |
|
|
1958 |
"Begin Training rep 2/5\t of Patient 14\n", |
|
|
1959 |
"[1, 45] loss: 1.402\tAccuracy : 0.284\t\tval-loss: 1.383\tval-Accuracy : 0.278\n", |
|
|
1960 |
"[6, 45] loss: 1.398\tAccuracy : 0.284\t\tval-loss: 1.381\tval-Accuracy : 0.278\n", |
|
|
1961 |
"[11, 45] loss: 1.338\tAccuracy : 0.398\t\tval-loss: 1.318\tval-Accuracy : 0.469\n", |
|
|
1962 |
"[16, 45] loss: 1.081\tAccuracy : 0.574\t\tval-loss: 1.017\tval-Accuracy : 0.608\n", |
|
|
1963 |
"[21, 45] loss: 0.981\tAccuracy : 0.605\t\tval-loss: 0.967\tval-Accuracy : 0.646\n", |
|
|
1964 |
"[26, 45] loss: 0.912\tAccuracy : 0.650\t\tval-loss: 0.921\tval-Accuracy : 0.656\n", |
|
|
1965 |
"[31, 45] loss: 0.715\tAccuracy : 0.749\t\tval-loss: 0.909\tval-Accuracy : 0.742\n", |
|
|
1966 |
"[36, 45] loss: 0.417\tAccuracy : 0.836\t\tval-loss: 0.597\tval-Accuracy : 0.828\n", |
|
|
1967 |
"[41, 45] loss: 0.338\tAccuracy : 0.889\t\tval-loss: 0.527\tval-Accuracy : 0.833\n", |
|
|
1968 |
"Begin Training rep 3/5\t of Patient 14\n", |
|
|
1969 |
"[1, 45] loss: 1.405\tAccuracy : 0.284\t\tval-loss: 1.384\tval-Accuracy : 0.278\n", |
|
|
1970 |
"[6, 45] loss: 1.262\tAccuracy : 0.446\t\tval-loss: 1.263\tval-Accuracy : 0.474\n", |
|
|
1971 |
"[11, 45] loss: 1.065\tAccuracy : 0.576\t\tval-loss: 1.061\tval-Accuracy : 0.589\n", |
|
|
1972 |
"[16, 45] loss: 0.951\tAccuracy : 0.636\t\tval-loss: 1.003\tval-Accuracy : 0.636\n", |
|
|
1973 |
"[21, 45] loss: 0.815\tAccuracy : 0.670\t\tval-loss: 0.894\tval-Accuracy : 0.651\n", |
|
|
1974 |
"[26, 45] loss: 0.668\tAccuracy : 0.762\t\tval-loss: 0.686\tval-Accuracy : 0.756\n", |
|
|
1975 |
"[31, 45] loss: 0.453\tAccuracy : 0.873\t\tval-loss: 0.530\tval-Accuracy : 0.837\n", |
|
|
1976 |
"[36, 45] loss: 0.275\tAccuracy : 0.909\t\tval-loss: 0.680\tval-Accuracy : 0.665\n", |
|
|
1977 |
"[41, 45] loss: 0.197\tAccuracy : 0.936\t\tval-loss: 0.713\tval-Accuracy : 0.689\n", |
|
|
1978 |
"Begin Training rep 4/5\t of Patient 14\n", |
|
|
1979 |
"[1, 45] loss: 1.401\tAccuracy : 0.283\t\tval-loss: 1.383\tval-Accuracy : 0.282\n", |
|
|
1980 |
"[6, 45] loss: 1.398\tAccuracy : 0.284\t\tval-loss: 1.382\tval-Accuracy : 0.278\n", |
|
|
1981 |
"[11, 45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.382\tval-Accuracy : 0.278\n", |
|
|
1982 |
"[16, 45] loss: 1.395\tAccuracy : 0.284\t\tval-loss: 1.380\tval-Accuracy : 0.278\n", |
|
|
1983 |
"[21, 45] loss: 1.186\tAccuracy : 0.495\t\tval-loss: 1.156\tval-Accuracy : 0.502\n", |
|
|
1984 |
"[26, 45] loss: 1.032\tAccuracy : 0.592\t\tval-loss: 1.023\tval-Accuracy : 0.627\n", |
|
|
1985 |
"[31, 45] loss: 0.933\tAccuracy : 0.656\t\tval-loss: 0.981\tval-Accuracy : 0.665\n", |
|
|
1986 |
"[36, 45] loss: 0.709\tAccuracy : 0.796\t\tval-loss: 0.710\tval-Accuracy : 0.799\n", |
|
|
1987 |
"[41, 45] loss: 0.409\tAccuracy : 0.861\t\tval-loss: 0.602\tval-Accuracy : 0.665\n", |
|
|
1988 |
"Begin Training rep 5/5\t of Patient 14\n", |
|
|
1989 |
"[1, 45] loss: 1.403\tAccuracy : 0.284\t\tval-loss: 1.383\tval-Accuracy : 0.278\n", |
|
|
1990 |
"[6, 45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.380\tval-Accuracy : 0.278\n", |
|
|
1991 |
"[11, 45] loss: 1.194\tAccuracy : 0.482\t\tval-loss: 1.203\tval-Accuracy : 0.478\n", |
|
|
1992 |
"[16, 45] loss: 1.020\tAccuracy : 0.572\t\tval-loss: 1.169\tval-Accuracy : 0.565\n", |
|
|
1993 |
"[21, 45] loss: 0.914\tAccuracy : 0.645\t\tval-loss: 1.014\tval-Accuracy : 0.612\n", |
|
|
1994 |
"[26, 45] loss: 0.781\tAccuracy : 0.718\t\tval-loss: 0.916\tval-Accuracy : 0.708\n", |
|
|
1995 |
"[31, 45] loss: 0.610\tAccuracy : 0.809\t\tval-loss: 0.580\tval-Accuracy : 0.842\n", |
|
|
1996 |
"[36, 45] loss: 0.403\tAccuracy : 0.870\t\tval-loss: 0.667\tval-Accuracy : 0.646\n", |
|
|
1997 |
"[41, 45] loss: 0.295\tAccuracy : 0.898\t\tval-loss: 0.687\tval-Accuracy : 0.675\n", |
|
|
1998 |
"loss: 0.411\tAccuracy : 0.899\t\tval-loss: 0.603\tval-Accuracy : 0.707\n", |
|
|
1999 |
"Begin Training rep 1/5\t of Patient 15\n", |
|
|
2000 |
"[1, 45] loss: 1.400\tAccuracy : 0.284\t\tval-loss: 1.382\tval-Accuracy : 0.273\n", |
|
|
2001 |
"[6, 45] loss: 1.397\tAccuracy : 0.284\t\tval-loss: 1.381\tval-Accuracy : 0.273\n", |
|
|
2002 |
"[11, 45] loss: 1.222\tAccuracy : 0.477\t\tval-loss: 1.399\tval-Accuracy : 0.309\n", |
|
|
2003 |
"[16, 45] loss: 1.019\tAccuracy : 0.573\t\tval-loss: 1.598\tval-Accuracy : 0.364\n", |
|
|
2004 |
"[21, 45] loss: 0.895\tAccuracy : 0.623\t\tval-loss: 1.710\tval-Accuracy : 0.418\n", |
|
|
2005 |
"[26, 45] loss: 0.726\tAccuracy : 0.690\t\tval-loss: 1.776\tval-Accuracy : 0.414\n", |
|
|
2006 |
"[31, 45] loss: 0.423\tAccuracy : 0.859\t\tval-loss: 2.021\tval-Accuracy : 0.295\n", |
|
|
2007 |
"[36, 45] loss: 0.241\tAccuracy : 0.894\t\tval-loss: 2.203\tval-Accuracy : 0.377\n", |
|
|
2008 |
"[41, 45] loss: 0.184\tAccuracy : 0.940\t\tval-loss: 2.258\tval-Accuracy : 0.395\n" |
|
|
2009 |
] |
|
|
2010 |
}, |
|
|
2011 |
{ |
|
|
2012 |
"ename": "IndexError", |
|
|
2013 |
"evalue": "index 13 is out of bounds for axis 1 with size 13", |
|
|
2014 |
"output_type": "error", |
|
|
2015 |
"traceback": [ |
|
|
2016 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
2017 |
"\u001b[1;31mIndexError\u001b[0m Traceback (most recent call last)", |
|
|
2018 |
"\u001b[1;32m<ipython-input-14-7a4d43dcb058>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 70\u001b[0m (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n\u001b[0;32m 71\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 72\u001b[1;33m \u001b[0mfold_vloss\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mrep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m \u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mval_loss\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 73\u001b[0m \u001b[0mfold_loss\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mrep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mrunning_loss\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 74\u001b[0m \u001b[0mfold_vacc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mrep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mval_acc\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2019 |
"\u001b[1;31mIndexError\u001b[0m: index 13 is out of bounds for axis 1 with size 13" |
|
|
2020 |
] |
|
|
2021 |
} |
|
|
2022 |
], |
|
|
2023 |
"source": [ |
|
|
2024 |
"p = 12\n", |
|
|
2025 |
"n_rep = 5 \n", |
|
|
2026 |
"n_patient = len(np.unique(Patient))\n", |
|
|
2027 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
2028 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
2029 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
2030 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
2031 |
"\n", |
|
|
2032 |
"for patient in np.unique(Patient):\n", |
|
|
2033 |
" patient = patient + 13\n", |
|
|
2034 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
2035 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
2036 |
" \n", |
|
|
2037 |
" for rep in range(n_rep):\n", |
|
|
2038 |
" np.random.shuffle(id_patient)\n", |
|
|
2039 |
" np.random.shuffle(id_train)\n", |
|
|
2040 |
" \n", |
|
|
2041 |
" X_train = tmp[id_train]\n", |
|
|
2042 |
" X_test = tmp[id_patient]\n", |
|
|
2043 |
" y_train = Label[id_train]\n", |
|
|
2044 |
" y_test = Label[id_patient]\n", |
|
|
2045 |
" \n", |
|
|
2046 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
2047 |
" (rep+1,n_rep, patient))\n", |
|
|
2048 |
" \n", |
|
|
2049 |
" CNN = MaxCNN().cuda()\n", |
|
|
2050 |
" criterion = nn.NLLLoss()\n", |
|
|
2051 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n", |
|
|
2052 |
" \n", |
|
|
2053 |
" n_epochs = 45\n", |
|
|
2054 |
" for epoch in range(n_epochs):\n", |
|
|
2055 |
" running_loss = 0.0\n", |
|
|
2056 |
" batchsize = 32\n", |
|
|
2057 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
2058 |
" \n", |
|
|
2059 |
" CNN.to(torch.device(\"cuda\"))\n", |
|
|
2060 |
" optimizer.zero_grad()\n", |
|
|
2061 |
" # forward + backward + optimize\n", |
|
|
2062 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
2063 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
2064 |
" loss.backward()\n", |
|
|
2065 |
" optimizer.step()\n", |
|
|
2066 |
" running_loss += loss.item()\n", |
|
|
2067 |
" \n", |
|
|
2068 |
" if epoch%5==0:\n", |
|
|
2069 |
" cnn_cpu = CNN.to(torch.device(\"cpu\"))\n", |
|
|
2070 |
"\n", |
|
|
2071 |
" check_id = np.arange(2000)\n", |
|
|
2072 |
" np.random.shuffle(check_id)\n", |
|
|
2073 |
" \n", |
|
|
2074 |
" #acc\n", |
|
|
2075 |
" acc = np.zeros(len(y_train))\n", |
|
|
2076 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
2077 |
" _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n", |
|
|
2078 |
" acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
2079 |
" acc = np.mean(acc)\n", |
|
|
2080 |
" \n", |
|
|
2081 |
" #validation\n", |
|
|
2082 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
2083 |
" val_loss = []\n", |
|
|
2084 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
2085 |
" val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n", |
|
|
2086 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
2087 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n", |
|
|
2088 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
2089 |
" val_acc = np.mean(val_acc)\n", |
|
|
2090 |
" val_loss = np.mean(val_loss)\n", |
|
|
2091 |
"\n", |
|
|
2092 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2093 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
2094 |
"\n", |
|
|
2095 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
2096 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
2097 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
2098 |
" fold_acc[rep, p] = acc\n", |
|
|
2099 |
" \n", |
|
|
2100 |
" \n", |
|
|
2101 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2102 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
2103 |
" \n", |
|
|
2104 |
" p = p + 1" |
|
|
2105 |
] |
|
|
2106 |
}, |
|
|
2107 |
{ |
|
|
2108 |
"cell_type": "code", |
|
|
2109 |
"execution_count": null, |
|
|
2110 |
"metadata": {}, |
|
|
2111 |
"outputs": [], |
|
|
2112 |
"source": [ |
|
|
2113 |
"sio.savemat('Result/Res_MaxPoolCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n", |
|
|
2114 |
"\n", |
|
|
2115 |
"fig = plt.figure(figsize=(12,10))\n", |
|
|
2116 |
"plt.grid()\n", |
|
|
2117 |
"plt.boxplot(fold_vacc)\n", |
|
|
2118 |
"plt.suptitle('Cross-Validation Accuracy\\n MaxPool CNN')\n", |
|
|
2119 |
"ax = plt.gca()\n", |
|
|
2120 |
"plt.xlabel('Patient id')\n", |
|
|
2121 |
"plt.ylabel('Accuracy')\n", |
|
|
2122 |
"plt.savefig('Result/MaxPoolCNN.png')\n", |
|
|
2123 |
"plt.show()" |
|
|
2124 |
] |
|
|
2125 |
}, |
|
|
2126 |
{ |
|
|
2127 |
"cell_type": "code", |
|
|
2128 |
"execution_count": 15, |
|
|
2129 |
"metadata": {}, |
|
|
2130 |
"outputs": [], |
|
|
2131 |
"source": [ |
|
|
2132 |
"class TempCNN(nn.Module):\n", |
|
|
2133 |
" def __init__(self):\n", |
|
|
2134 |
" super(TempCNN, self).__init__()\n", |
|
|
2135 |
" \n", |
|
|
2136 |
" \n", |
|
|
2137 |
" self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n", |
|
|
2138 |
" self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2139 |
" self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2140 |
" self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2141 |
" self.pool1 = nn.MaxPool2d((2,2))\n", |
|
|
2142 |
" self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2143 |
" self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2144 |
" self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n", |
|
|
2145 |
" \n", |
|
|
2146 |
" self.conv8 = nn.Conv1d(7,64,(4*4*128,3),stride=(1,1),padding=1)\n", |
|
|
2147 |
" \n", |
|
|
2148 |
" self.pool = nn.MaxPool2d((7,1))\n", |
|
|
2149 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
2150 |
" self.fc = nn.Linear(192,4)\n", |
|
|
2151 |
" self.max = nn.LogSoftmax()\n", |
|
|
2152 |
" \n", |
|
|
2153 |
" def forward(self, x):\n", |
|
|
2154 |
" if x.get_device() == 0:\n", |
|
|
2155 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n", |
|
|
2156 |
" else:\n", |
|
|
2157 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n", |
|
|
2158 |
" for i in range(7):\n", |
|
|
2159 |
" tmp[:,i] = self.pool1( F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n", |
|
|
2160 |
" x = tmp.reshape(x.shape[0], x.shape[1],4*128*4,1)\n", |
|
|
2161 |
" x = F.relu(self.conv8(x))\n", |
|
|
2162 |
" x = x.view(x.shape[0],-1)\n", |
|
|
2163 |
" x = self.fc(x)\n", |
|
|
2164 |
" x = self.max(x)\n", |
|
|
2165 |
" return x" |
|
|
2166 |
] |
|
|
2167 |
}, |
|
|
2168 |
{ |
|
|
2169 |
"cell_type": "code", |
|
|
2170 |
"execution_count": 36, |
|
|
2171 |
"metadata": {}, |
|
|
2172 |
"outputs": [ |
|
|
2173 |
{ |
|
|
2174 |
"name": "stderr", |
|
|
2175 |
"output_type": "stream", |
|
|
2176 |
"text": [ |
|
|
2177 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:33: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
2178 |
] |
|
|
2179 |
}, |
|
|
2180 |
{ |
|
|
2181 |
"data": { |
|
|
2182 |
"text/plain": [ |
|
|
2183 |
"tensor([[-1.3924, -1.3307, -1.3637, -1.4631],\n", |
|
|
2184 |
" [-1.3927, -1.3307, -1.3638, -1.4627]], device='cuda:0',\n", |
|
|
2185 |
" grad_fn=<LogSoftmaxBackward>)" |
|
|
2186 |
] |
|
|
2187 |
}, |
|
|
2188 |
"execution_count": 36, |
|
|
2189 |
"metadata": {}, |
|
|
2190 |
"output_type": "execute_result" |
|
|
2191 |
} |
|
|
2192 |
], |
|
|
2193 |
"source": [ |
|
|
2194 |
"net = TempCNN().cuda()\n", |
|
|
2195 |
"net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())" |
|
|
2196 |
] |
|
|
2197 |
}, |
|
|
2198 |
{ |
|
|
2199 |
"cell_type": "code", |
|
|
2200 |
"execution_count": 41, |
|
|
2201 |
"metadata": { |
|
|
2202 |
"scrolled": true |
|
|
2203 |
}, |
|
|
2204 |
"outputs": [ |
|
|
2205 |
{ |
|
|
2206 |
"name": "stdout", |
|
|
2207 |
"output_type": "stream", |
|
|
2208 |
"text": [ |
|
|
2209 |
"Begin Training rep 1/1\t of Patient 9\n" |
|
|
2210 |
] |
|
|
2211 |
}, |
|
|
2212 |
{ |
|
|
2213 |
"name": "stderr", |
|
|
2214 |
"output_type": "stream", |
|
|
2215 |
"text": [ |
|
|
2216 |
"c:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\ipykernel_launcher.py:33: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
2217 |
] |
|
|
2218 |
}, |
|
|
2219 |
{ |
|
|
2220 |
"name": "stdout", |
|
|
2221 |
"output_type": "stream", |
|
|
2222 |
"text": [ |
|
|
2223 |
"[1, 45] loss: 1.404\tAccuracy : 0.283\t\tval-loss: 1.380\tval-Accuracy : 0.287\n", |
|
|
2224 |
"[2, 45] loss: 1.401\tAccuracy : 0.283\t\tval-loss: 1.376\tval-Accuracy : 0.287\n", |
|
|
2225 |
"[3, 45] loss: 1.399\tAccuracy : 0.283\t\tval-loss: 1.373\tval-Accuracy : 0.287\n", |
|
|
2226 |
"[4, 45] loss: 1.398\tAccuracy : 0.283\t\tval-loss: 1.371\tval-Accuracy : 0.287\n", |
|
|
2227 |
"[5, 45] loss: 1.397\tAccuracy : 0.283\t\tval-loss: 1.370\tval-Accuracy : 0.287\n", |
|
|
2228 |
"[6, 45] loss: 1.396\tAccuracy : 0.283\t\tval-loss: 1.366\tval-Accuracy : 0.287\n", |
|
|
2229 |
"[7, 45] loss: 1.390\tAccuracy : 0.283\t\tval-loss: 1.347\tval-Accuracy : 0.287\n", |
|
|
2230 |
"[8, 45] loss: 1.328\tAccuracy : 0.427\t\tval-loss: 1.156\tval-Accuracy : 0.307\n", |
|
|
2231 |
"[9, 45] loss: 1.217\tAccuracy : 0.519\t\tval-loss: 1.032\tval-Accuracy : 0.485\n", |
|
|
2232 |
"[10, 45] loss: 1.113\tAccuracy : 0.565\t\tval-loss: 0.905\tval-Accuracy : 0.604\n", |
|
|
2233 |
"[11, 45] loss: 1.030\tAccuracy : 0.598\t\tval-loss: 0.829\tval-Accuracy : 0.634\n", |
|
|
2234 |
"[12, 45] loss: 0.970\tAccuracy : 0.628\t\tval-loss: 0.782\tval-Accuracy : 0.678\n", |
|
|
2235 |
"[13, 45] loss: 0.921\tAccuracy : 0.655\t\tval-loss: 0.774\tval-Accuracy : 0.688\n", |
|
|
2236 |
"[14, 45] loss: 0.840\tAccuracy : 0.702\t\tval-loss: 0.624\tval-Accuracy : 0.787\n", |
|
|
2237 |
"[15, 45] loss: 0.756\tAccuracy : 0.751\t\tval-loss: 0.581\tval-Accuracy : 0.807\n", |
|
|
2238 |
"[16, 45] loss: 0.757\tAccuracy : 0.783\t\tval-loss: 0.499\tval-Accuracy : 0.851\n", |
|
|
2239 |
"[17, 45] loss: 0.627\tAccuracy : 0.810\t\tval-loss: 0.366\tval-Accuracy : 0.906\n", |
|
|
2240 |
"[18, 45] loss: 0.556\tAccuracy : 0.840\t\tval-loss: 0.308\tval-Accuracy : 0.936\n", |
|
|
2241 |
"[19, 45] loss: 0.448\tAccuracy : 0.868\t\tval-loss: 0.163\tval-Accuracy : 0.960\n", |
|
|
2242 |
"[20, 45] loss: 0.399\tAccuracy : 0.868\t\tval-loss: 0.170\tval-Accuracy : 0.960\n", |
|
|
2243 |
"[21, 45] loss: 0.293\tAccuracy : 0.905\t\tval-loss: 0.132\tval-Accuracy : 0.970\n", |
|
|
2244 |
"[22, 45] loss: 0.242\tAccuracy : 0.925\t\tval-loss: 0.093\tval-Accuracy : 0.980\n", |
|
|
2245 |
"[23, 45] loss: 0.205\tAccuracy : 0.931\t\tval-loss: 0.086\tval-Accuracy : 0.980\n", |
|
|
2246 |
"[24, 45] loss: 0.175\tAccuracy : 0.942\t\tval-loss: 0.078\tval-Accuracy : 0.980\n", |
|
|
2247 |
"[25, 45] loss: 0.159\tAccuracy : 0.941\t\tval-loss: 0.100\tval-Accuracy : 0.975\n", |
|
|
2248 |
"[26, 45] loss: 0.169\tAccuracy : 0.943\t\tval-loss: 0.065\tval-Accuracy : 0.980\n", |
|
|
2249 |
"[27, 45] loss: 0.982\tAccuracy : 0.836\t\tval-loss: 0.336\tval-Accuracy : 0.916\n", |
|
|
2250 |
"[28, 45] loss: 0.310\tAccuracy : 0.915\t\tval-loss: 0.066\tval-Accuracy : 0.990\n", |
|
|
2251 |
"[29, 45] loss: 0.195\tAccuracy : 0.931\t\tval-loss: 0.091\tval-Accuracy : 0.980\n", |
|
|
2252 |
"[30, 45] loss: 0.159\tAccuracy : 0.940\t\tval-loss: 0.096\tval-Accuracy : 0.980\n", |
|
|
2253 |
"[31, 45] loss: 0.155\tAccuracy : 0.933\t\tval-loss: 0.089\tval-Accuracy : 0.975\n", |
|
|
2254 |
"[32, 45] loss: 0.160\tAccuracy : 0.949\t\tval-loss: 0.077\tval-Accuracy : 0.970\n", |
|
|
2255 |
"[33, 45] loss: 0.117\tAccuracy : 0.953\t\tval-loss: 0.090\tval-Accuracy : 0.965\n", |
|
|
2256 |
"[34, 45] loss: 0.109\tAccuracy : 0.950\t\tval-loss: 0.069\tval-Accuracy : 0.980\n", |
|
|
2257 |
"[35, 45] loss: 0.101\tAccuracy : 0.955\t\tval-loss: 0.078\tval-Accuracy : 0.975\n", |
|
|
2258 |
"[36, 45] loss: 0.094\tAccuracy : 0.957\t\tval-loss: 0.080\tval-Accuracy : 0.970\n", |
|
|
2259 |
"[37, 45] loss: 0.090\tAccuracy : 0.958\t\tval-loss: 0.078\tval-Accuracy : 0.975\n", |
|
|
2260 |
"[38, 45] loss: 0.086\tAccuracy : 0.959\t\tval-loss: 0.079\tval-Accuracy : 0.970\n", |
|
|
2261 |
"[39, 45] loss: 0.083\tAccuracy : 0.958\t\tval-loss: 0.080\tval-Accuracy : 0.970\n", |
|
|
2262 |
"[40, 45] loss: 0.080\tAccuracy : 0.960\t\tval-loss: 0.082\tval-Accuracy : 0.965\n", |
|
|
2263 |
"[41, 45] loss: 0.078\tAccuracy : 0.962\t\tval-loss: 0.085\tval-Accuracy : 0.970\n", |
|
|
2264 |
"[42, 45] loss: 0.074\tAccuracy : 0.962\t\tval-loss: 0.086\tval-Accuracy : 0.975\n", |
|
|
2265 |
"[43, 45] loss: 0.072\tAccuracy : 0.963\t\tval-loss: 0.090\tval-Accuracy : 0.970\n", |
|
|
2266 |
"[44, 45] loss: 0.071\tAccuracy : 0.964\t\tval-loss: 0.094\tval-Accuracy : 0.970\n", |
|
|
2267 |
"[45, 45] loss: 0.069\tAccuracy : 0.963\t\tval-loss: 0.099\tval-Accuracy : 0.970\n", |
|
|
2268 |
"loss: 0.069\tAccuracy : 0.963\t\tval-loss: 0.099\tval-Accuracy : 0.970\n", |
|
|
2269 |
"Begin Training rep 1/1\t of Patient 12\n", |
|
|
2270 |
"[1, 45] loss: 1.404\tAccuracy : 0.240\t\tval-loss: 1.385\tval-Accuracy : 0.240\n", |
|
|
2271 |
"[2, 45] loss: 1.401\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
2272 |
"[3, 45] loss: 1.399\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
2273 |
"[4, 45] loss: 1.398\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
2274 |
"[5, 45] loss: 1.397\tAccuracy : 0.285\t\tval-loss: 1.384\tval-Accuracy : 0.263\n", |
|
|
2275 |
"[6, 45] loss: 1.396\tAccuracy : 0.285\t\tval-loss: 1.383\tval-Accuracy : 0.263\n", |
|
|
2276 |
"[7, 45] loss: 1.393\tAccuracy : 0.285\t\tval-loss: 1.378\tval-Accuracy : 0.263\n", |
|
|
2277 |
"[8, 45] loss: 1.358\tAccuracy : 0.344\t\tval-loss: 1.328\tval-Accuracy : 0.318\n", |
|
|
2278 |
"[9, 45] loss: 1.249\tAccuracy : 0.497\t\tval-loss: 1.229\tval-Accuracy : 0.530\n", |
|
|
2279 |
"[10, 45] loss: 1.139\tAccuracy : 0.607\t\tval-loss: 1.209\tval-Accuracy : 0.571\n", |
|
|
2280 |
"[11, 45] loss: 1.048\tAccuracy : 0.633\t\tval-loss: 1.135\tval-Accuracy : 0.553\n", |
|
|
2281 |
"[12, 45] loss: 0.962\tAccuracy : 0.654\t\tval-loss: 1.078\tval-Accuracy : 0.562\n", |
|
|
2282 |
"[13, 45] loss: 0.865\tAccuracy : 0.687\t\tval-loss: 1.236\tval-Accuracy : 0.581\n", |
|
|
2283 |
"[14, 45] loss: 0.781\tAccuracy : 0.735\t\tval-loss: 1.049\tval-Accuracy : 0.636\n", |
|
|
2284 |
"[15, 45] loss: 0.681\tAccuracy : 0.764\t\tval-loss: 1.046\tval-Accuracy : 0.608\n", |
|
|
2285 |
"[16, 45] loss: 0.588\tAccuracy : 0.788\t\tval-loss: 1.112\tval-Accuracy : 0.641\n", |
|
|
2286 |
"[17, 45] loss: 0.558\tAccuracy : 0.821\t\tval-loss: 0.808\tval-Accuracy : 0.760\n", |
|
|
2287 |
"[18, 45] loss: 0.467\tAccuracy : 0.837\t\tval-loss: 1.016\tval-Accuracy : 0.747\n", |
|
|
2288 |
"[19, 45] loss: 0.453\tAccuracy : 0.851\t\tval-loss: 0.793\tval-Accuracy : 0.816\n", |
|
|
2289 |
"[20, 45] loss: 0.519\tAccuracy : 0.819\t\tval-loss: 0.884\tval-Accuracy : 0.770\n", |
|
|
2290 |
"[21, 45] loss: 0.449\tAccuracy : 0.867\t\tval-loss: 0.907\tval-Accuracy : 0.751\n", |
|
|
2291 |
"[22, 45] loss: 0.298\tAccuracy : 0.885\t\tval-loss: 0.991\tval-Accuracy : 0.779\n", |
|
|
2292 |
"[23, 45] loss: 0.231\tAccuracy : 0.888\t\tval-loss: 1.303\tval-Accuracy : 0.760\n", |
|
|
2293 |
"[24, 45] loss: 0.354\tAccuracy : 0.890\t\tval-loss: 0.884\tval-Accuracy : 0.760\n", |
|
|
2294 |
"[25, 45] loss: 0.228\tAccuracy : 0.915\t\tval-loss: 1.024\tval-Accuracy : 0.802\n", |
|
|
2295 |
"[26, 45] loss: 0.277\tAccuracy : 0.888\t\tval-loss: 0.807\tval-Accuracy : 0.765\n", |
|
|
2296 |
"[27, 45] loss: 0.247\tAccuracy : 0.859\t\tval-loss: 0.909\tval-Accuracy : 0.751\n", |
|
|
2297 |
"[28, 45] loss: 0.177\tAccuracy : 0.930\t\tval-loss: 1.099\tval-Accuracy : 0.829\n", |
|
|
2298 |
"[29, 45] loss: 0.139\tAccuracy : 0.936\t\tval-loss: 1.224\tval-Accuracy : 0.829\n", |
|
|
2299 |
"[30, 45] loss: 0.125\tAccuracy : 0.937\t\tval-loss: 1.362\tval-Accuracy : 0.820\n", |
|
|
2300 |
"[31, 45] loss: 0.113\tAccuracy : 0.937\t\tval-loss: 1.460\tval-Accuracy : 0.825\n", |
|
|
2301 |
"[32, 45] loss: 0.105\tAccuracy : 0.939\t\tval-loss: 1.542\tval-Accuracy : 0.829\n", |
|
|
2302 |
"[33, 45] loss: 0.100\tAccuracy : 0.939\t\tval-loss: 1.617\tval-Accuracy : 0.839\n", |
|
|
2303 |
"[34, 45] loss: 0.095\tAccuracy : 0.944\t\tval-loss: 1.673\tval-Accuracy : 0.839\n", |
|
|
2304 |
"[35, 45] loss: 0.091\tAccuracy : 0.946\t\tval-loss: 1.712\tval-Accuracy : 0.848\n", |
|
|
2305 |
"[36, 45] loss: 0.087\tAccuracy : 0.947\t\tval-loss: 1.763\tval-Accuracy : 0.848\n", |
|
|
2306 |
"[37, 45] loss: 0.084\tAccuracy : 0.948\t\tval-loss: 1.797\tval-Accuracy : 0.857\n", |
|
|
2307 |
"[38, 45] loss: 0.082\tAccuracy : 0.949\t\tval-loss: 1.817\tval-Accuracy : 0.857\n", |
|
|
2308 |
"[39, 45] loss: 0.079\tAccuracy : 0.952\t\tval-loss: 1.853\tval-Accuracy : 0.857\n", |
|
|
2309 |
"[40, 45] loss: 0.077\tAccuracy : 0.955\t\tval-loss: 1.889\tval-Accuracy : 0.853\n", |
|
|
2310 |
"[41, 45] loss: 0.075\tAccuracy : 0.956\t\tval-loss: 1.911\tval-Accuracy : 0.853\n", |
|
|
2311 |
"[42, 45] loss: 0.074\tAccuracy : 0.957\t\tval-loss: 1.904\tval-Accuracy : 0.857\n", |
|
|
2312 |
"[43, 45] loss: 0.072\tAccuracy : 0.961\t\tval-loss: 2.002\tval-Accuracy : 0.862\n", |
|
|
2313 |
"[44, 45] loss: 0.071\tAccuracy : 0.965\t\tval-loss: 1.996\tval-Accuracy : 0.862\n", |
|
|
2314 |
"[45, 45] loss: 0.070\tAccuracy : 0.962\t\tval-loss: 2.043\tval-Accuracy : 0.871\n", |
|
|
2315 |
"loss: 0.070\tAccuracy : 0.962\t\tval-loss: 2.043\tval-Accuracy : 0.871\n", |
|
|
2316 |
"Begin Training rep 1/1\t of Patient 10\n", |
|
|
2317 |
"[1, 45] loss: 1.402\tAccuracy : 0.285\t\tval-loss: 1.382\tval-Accuracy : 0.271\n", |
|
|
2318 |
"[2, 45] loss: 1.400\tAccuracy : 0.285\t\tval-loss: 1.380\tval-Accuracy : 0.271\n", |
|
|
2319 |
"[3, 45] loss: 1.398\tAccuracy : 0.285\t\tval-loss: 1.379\tval-Accuracy : 0.271\n", |
|
|
2320 |
"[4, 45] loss: 1.396\tAccuracy : 0.285\t\tval-loss: 1.374\tval-Accuracy : 0.271\n", |
|
|
2321 |
"[5, 45] loss: 1.378\tAccuracy : 0.285\t\tval-loss: 1.302\tval-Accuracy : 0.271\n", |
|
|
2322 |
"[6, 45] loss: 1.270\tAccuracy : 0.467\t\tval-loss: 1.120\tval-Accuracy : 0.467\n", |
|
|
2323 |
"[7, 45] loss: 1.172\tAccuracy : 0.559\t\tval-loss: 1.016\tval-Accuracy : 0.562\n", |
|
|
2324 |
"[8, 45] loss: 1.097\tAccuracy : 0.586\t\tval-loss: 0.936\tval-Accuracy : 0.600\n", |
|
|
2325 |
"[9, 45] loss: 1.027\tAccuracy : 0.620\t\tval-loss: 0.890\tval-Accuracy : 0.648\n", |
|
|
2326 |
"[10, 45] loss: 0.963\tAccuracy : 0.657\t\tval-loss: 0.813\tval-Accuracy : 0.671\n", |
|
|
2327 |
"[11, 45] loss: 0.887\tAccuracy : 0.700\t\tval-loss: 0.712\tval-Accuracy : 0.733\n", |
|
|
2328 |
"[12, 45] loss: 0.842\tAccuracy : 0.714\t\tval-loss: 0.636\tval-Accuracy : 0.786\n", |
|
|
2329 |
"[13, 45] loss: 0.764\tAccuracy : 0.754\t\tval-loss: 0.595\tval-Accuracy : 0.814\n", |
|
|
2330 |
"[14, 45] loss: 0.690\tAccuracy : 0.784\t\tval-loss: 0.482\tval-Accuracy : 0.838\n", |
|
|
2331 |
"[15, 45] loss: 0.628\tAccuracy : 0.815\t\tval-loss: 0.438\tval-Accuracy : 0.857\n", |
|
|
2332 |
"[16, 45] loss: 0.604\tAccuracy : 0.826\t\tval-loss: 0.375\tval-Accuracy : 0.895\n", |
|
|
2333 |
"[17, 45] loss: 0.553\tAccuracy : 0.840\t\tval-loss: 0.348\tval-Accuracy : 0.900\n", |
|
|
2334 |
"[18, 45] loss: 0.425\tAccuracy : 0.867\t\tval-loss: 0.279\tval-Accuracy : 0.938\n", |
|
|
2335 |
"[19, 45] loss: 0.396\tAccuracy : 0.889\t\tval-loss: 0.228\tval-Accuracy : 0.957\n", |
|
|
2336 |
"[20, 45] loss: 0.338\tAccuracy : 0.893\t\tval-loss: 0.194\tval-Accuracy : 0.957\n", |
|
|
2337 |
"[21, 45] loss: 0.262\tAccuracy : 0.914\t\tval-loss: 0.182\tval-Accuracy : 0.952\n", |
|
|
2338 |
"[22, 45] loss: 0.219\tAccuracy : 0.925\t\tval-loss: 0.217\tval-Accuracy : 0.943\n", |
|
|
2339 |
"[23, 45] loss: 0.224\tAccuracy : 0.934\t\tval-loss: 0.160\tval-Accuracy : 0.957\n", |
|
|
2340 |
"[24, 45] loss: 0.256\tAccuracy : 0.901\t\tval-loss: 0.193\tval-Accuracy : 0.952\n", |
|
|
2341 |
"[25, 45] loss: 0.266\tAccuracy : 0.921\t\tval-loss: 0.134\tval-Accuracy : 0.952\n", |
|
|
2342 |
"[26, 45] loss: 0.172\tAccuracy : 0.945\t\tval-loss: 0.153\tval-Accuracy : 0.962\n", |
|
|
2343 |
"[27, 45] loss: 0.139\tAccuracy : 0.950\t\tval-loss: 0.152\tval-Accuracy : 0.962\n", |
|
|
2344 |
"[28, 45] loss: 0.121\tAccuracy : 0.953\t\tval-loss: 0.153\tval-Accuracy : 0.957\n", |
|
|
2345 |
"[29, 45] loss: 0.111\tAccuracy : 0.954\t\tval-loss: 0.155\tval-Accuracy : 0.962\n", |
|
|
2346 |
"[30, 45] loss: 0.103\tAccuracy : 0.955\t\tval-loss: 0.158\tval-Accuracy : 0.957\n", |
|
|
2347 |
"[31, 45] loss: 0.098\tAccuracy : 0.955\t\tval-loss: 0.162\tval-Accuracy : 0.957\n", |
|
|
2348 |
"[32, 45] loss: 0.093\tAccuracy : 0.959\t\tval-loss: 0.162\tval-Accuracy : 0.952\n", |
|
|
2349 |
"[33, 45] loss: 0.088\tAccuracy : 0.959\t\tval-loss: 0.169\tval-Accuracy : 0.952\n", |
|
|
2350 |
"[34, 45] loss: 0.085\tAccuracy : 0.959\t\tval-loss: 0.181\tval-Accuracy : 0.943\n", |
|
|
2351 |
"[35, 45] loss: 0.082\tAccuracy : 0.959\t\tval-loss: 0.190\tval-Accuracy : 0.938\n", |
|
|
2352 |
"[36, 45] loss: 0.080\tAccuracy : 0.960\t\tval-loss: 0.200\tval-Accuracy : 0.938\n", |
|
|
2353 |
"[37, 45] loss: 0.077\tAccuracy : 0.960\t\tval-loss: 0.209\tval-Accuracy : 0.938\n", |
|
|
2354 |
"[38, 45] loss: 0.075\tAccuracy : 0.961\t\tval-loss: 0.214\tval-Accuracy : 0.938\n", |
|
|
2355 |
"[39, 45] loss: 0.073\tAccuracy : 0.961\t\tval-loss: 0.218\tval-Accuracy : 0.938\n", |
|
|
2356 |
"[40, 45] loss: 0.071\tAccuracy : 0.961\t\tval-loss: 0.228\tval-Accuracy : 0.938\n", |
|
|
2357 |
"[41, 45] loss: 0.069\tAccuracy : 0.962\t\tval-loss: 0.236\tval-Accuracy : 0.938\n", |
|
|
2358 |
"[42, 45] loss: 0.068\tAccuracy : 0.962\t\tval-loss: 0.248\tval-Accuracy : 0.943\n", |
|
|
2359 |
"[43, 45] loss: 0.067\tAccuracy : 0.963\t\tval-loss: 0.256\tval-Accuracy : 0.943\n", |
|
|
2360 |
"[44, 45] loss: 0.066\tAccuracy : 0.963\t\tval-loss: 0.261\tval-Accuracy : 0.938\n", |
|
|
2361 |
"[45, 45] loss: 0.065\tAccuracy : 0.963\t\tval-loss: 0.271\tval-Accuracy : 0.938\n", |
|
|
2362 |
"loss: 0.065\tAccuracy : 0.963\t\tval-loss: 0.271\tval-Accuracy : 0.938\n", |
|
|
2363 |
"Begin Training rep 1/1\t of Patient 8\n", |
|
|
2364 |
"[1, 45] loss: 1.400\tAccuracy : 0.283\t\tval-loss: 1.373\tval-Accuracy : 0.290\n", |
|
|
2365 |
"[2, 45] loss: 1.399\tAccuracy : 0.283\t\tval-loss: 1.370\tval-Accuracy : 0.290\n", |
|
|
2366 |
"[3, 45] loss: 1.398\tAccuracy : 0.283\t\tval-loss: 1.367\tval-Accuracy : 0.290\n", |
|
|
2367 |
"[4, 45] loss: 1.397\tAccuracy : 0.283\t\tval-loss: 1.364\tval-Accuracy : 0.290\n", |
|
|
2368 |
"[5, 45] loss: 1.395\tAccuracy : 0.283\t\tval-loss: 1.359\tval-Accuracy : 0.290\n", |
|
|
2369 |
"[6, 45] loss: 1.385\tAccuracy : 0.283\t\tval-loss: 1.320\tval-Accuracy : 0.290\n", |
|
|
2370 |
"[7, 45] loss: 1.287\tAccuracy : 0.499\t\tval-loss: 1.221\tval-Accuracy : 0.373\n", |
|
|
2371 |
"[8, 45] loss: 1.172\tAccuracy : 0.560\t\tval-loss: 1.076\tval-Accuracy : 0.534\n", |
|
|
2372 |
"[9, 45] loss: 1.078\tAccuracy : 0.619\t\tval-loss: 0.941\tval-Accuracy : 0.627\n", |
|
|
2373 |
"[10, 45] loss: 0.986\tAccuracy : 0.677\t\tval-loss: 0.871\tval-Accuracy : 0.705\n", |
|
|
2374 |
"[11, 45] loss: 0.886\tAccuracy : 0.705\t\tval-loss: 0.763\tval-Accuracy : 0.705\n", |
|
|
2375 |
"[12, 45] loss: 0.829\tAccuracy : 0.727\t\tval-loss: 0.767\tval-Accuracy : 0.746\n", |
|
|
2376 |
"[13, 45] loss: 0.714\tAccuracy : 0.778\t\tval-loss: 0.555\tval-Accuracy : 0.855\n", |
|
|
2377 |
"[14, 45] loss: 0.628\tAccuracy : 0.802\t\tval-loss: 0.532\tval-Accuracy : 0.834\n", |
|
|
2378 |
"[15, 45] loss: 0.625\tAccuracy : 0.825\t\tval-loss: 0.472\tval-Accuracy : 0.886\n", |
|
|
2379 |
"[16, 45] loss: 0.508\tAccuracy : 0.859\t\tval-loss: 0.493\tval-Accuracy : 0.902\n", |
|
|
2380 |
"[17, 45] loss: 0.379\tAccuracy : 0.887\t\tval-loss: 0.454\tval-Accuracy : 0.902\n", |
|
|
2381 |
"[18, 45] loss: 0.389\tAccuracy : 0.869\t\tval-loss: 0.445\tval-Accuracy : 0.907\n", |
|
|
2382 |
"[19, 45] loss: 0.326\tAccuracy : 0.897\t\tval-loss: 0.337\tval-Accuracy : 0.933\n", |
|
|
2383 |
"[20, 45] loss: 0.279\tAccuracy : 0.907\t\tval-loss: 0.660\tval-Accuracy : 0.948\n", |
|
|
2384 |
"[21, 45] loss: 0.210\tAccuracy : 0.926\t\tval-loss: 0.715\tval-Accuracy : 0.938\n", |
|
|
2385 |
"[22, 45] loss: 0.170\tAccuracy : 0.931\t\tval-loss: 0.685\tval-Accuracy : 0.943\n", |
|
|
2386 |
"[23, 45] loss: 0.146\tAccuracy : 0.931\t\tval-loss: 0.567\tval-Accuracy : 0.943\n", |
|
|
2387 |
"[24, 45] loss: 0.131\tAccuracy : 0.942\t\tval-loss: 0.690\tval-Accuracy : 0.943\n", |
|
|
2388 |
"[25, 45] loss: 0.118\tAccuracy : 0.944\t\tval-loss: 0.750\tval-Accuracy : 0.943\n", |
|
|
2389 |
"[26, 45] loss: 0.110\tAccuracy : 0.947\t\tval-loss: 0.761\tval-Accuracy : 0.938\n", |
|
|
2390 |
"[27, 45] loss: 0.104\tAccuracy : 0.948\t\tval-loss: 0.730\tval-Accuracy : 0.938\n", |
|
|
2391 |
"[28, 45] loss: 0.099\tAccuracy : 0.948\t\tval-loss: 0.796\tval-Accuracy : 0.938\n", |
|
|
2392 |
"[29, 45] loss: 0.094\tAccuracy : 0.950\t\tval-loss: 0.847\tval-Accuracy : 0.938\n", |
|
|
2393 |
"[30, 45] loss: 0.090\tAccuracy : 0.950\t\tval-loss: 0.866\tval-Accuracy : 0.943\n" |
|
|
2394 |
] |
|
|
2395 |
}, |
|
|
2396 |
{ |
|
|
2397 |
"ename": "KeyboardInterrupt", |
|
|
2398 |
"evalue": "", |
|
|
2399 |
"output_type": "error", |
|
|
2400 |
"traceback": [ |
|
|
2401 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
2402 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", |
|
|
2403 |
"\u001b[1;32m<ipython-input-41-e7fae06453d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 52\u001b[0m \u001b[0macc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 53\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0macc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 54\u001b[1;33m \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0midx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 55\u001b[0m \u001b[0macc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0midx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"cpu\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 56\u001b[0m \u001b[0macc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0macc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2404 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 542\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2405 |
"\u001b[1;32m<ipython-input-16-e790eb8c12b5>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[0mtmp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 27\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 28\u001b[1;33m \u001b[0mtmp\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv7\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv6\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv5\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpool1\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv4\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv3\u001b[0m\u001b[1;33m(\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 29\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtmp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconv8\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2406 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 541\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 542\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2407 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\modules\\pooling.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 139\u001b[0m return F.max_pool2d(input, self.kernel_size, self.stride,\n\u001b[0;32m 140\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpadding\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdilation\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mceil_mode\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 141\u001b[1;33m self.return_indices)\n\u001b[0m\u001b[0;32m 142\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 143\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2408 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\_jit_internal.py\u001b[0m in \u001b[0;36mfn\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 136\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 137\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 138\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 139\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 140\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mif_true\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mif_false\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__doc__\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2409 |
"\u001b[1;32mc:\\users\\victo\\.conda\\envs\\pytorch_eeg\\lib\\site-packages\\torch\\nn\\functional.py\u001b[0m in \u001b[0;36m_max_pool2d\u001b[1;34m(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)\u001b[0m\n\u001b[0;32m 486\u001b[0m \u001b[0mstride\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mannotate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mList\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 487\u001b[0m return torch.max_pool2d(\n\u001b[1;32m--> 488\u001b[1;33m input, kernel_size, stride, padding, dilation, ceil_mode)\n\u001b[0m\u001b[0;32m 489\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 490\u001b[0m max_pool2d = boolean_dispatch(\n", |
|
|
2410 |
"\u001b[1;31mKeyboardInterrupt\u001b[0m: " |
|
|
2411 |
] |
|
|
2412 |
} |
|
|
2413 |
], |
|
|
2414 |
"source": [ |
|
|
2415 |
"p = 0\n", |
|
|
2416 |
"n_rep = 1 \n", |
|
|
2417 |
"n_patient = len(np.unique(Patient))\n", |
|
|
2418 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
2419 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
2420 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
2421 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
2422 |
"\n", |
|
|
2423 |
"un = np.unique(Patient)\n", |
|
|
2424 |
"np.random.shuffle(un)\n", |
|
|
2425 |
"for patient in un:\n", |
|
|
2426 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
2427 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
2428 |
" \n", |
|
|
2429 |
" for rep in range(n_rep):\n", |
|
|
2430 |
" np.random.shuffle(id_patient)\n", |
|
|
2431 |
" np.random.shuffle(id_train)\n", |
|
|
2432 |
" \n", |
|
|
2433 |
" X_train = tmp[id_train]\n", |
|
|
2434 |
" X_test = tmp[id_patient]\n", |
|
|
2435 |
" y_train = Label[id_train]\n", |
|
|
2436 |
" y_test = Label[id_patient]\n", |
|
|
2437 |
" \n", |
|
|
2438 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
2439 |
" (rep+1,n_rep, patient))\n", |
|
|
2440 |
" \n", |
|
|
2441 |
" CNN = TempCNN().cuda()\n", |
|
|
2442 |
" criterion = nn.NLLLoss()\n", |
|
|
2443 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n", |
|
|
2444 |
" \n", |
|
|
2445 |
" n_epochs = 45\n", |
|
|
2446 |
" for epoch in range(n_epochs):\n", |
|
|
2447 |
" running_loss = 0.0\n", |
|
|
2448 |
" batchsize = 32\n", |
|
|
2449 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
2450 |
" \n", |
|
|
2451 |
" CNN.to(torch.device(\"cuda\"))\n", |
|
|
2452 |
" optimizer.zero_grad()\n", |
|
|
2453 |
" # forward + backward + optimize\n", |
|
|
2454 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
2455 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
2456 |
" loss.backward()\n", |
|
|
2457 |
" optimizer.step()\n", |
|
|
2458 |
" running_loss += loss.item()\n", |
|
|
2459 |
" \n", |
|
|
2460 |
" if epoch%1==0:\n", |
|
|
2461 |
"\n", |
|
|
2462 |
" check_id = np.arange(2000)\n", |
|
|
2463 |
" np.random.shuffle(check_id)\n", |
|
|
2464 |
" \n", |
|
|
2465 |
" #acc\n", |
|
|
2466 |
" acc = np.zeros(len(y_train))\n", |
|
|
2467 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
2468 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n", |
|
|
2469 |
" acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
2470 |
" acc = np.mean(acc)\n", |
|
|
2471 |
" \n", |
|
|
2472 |
" #validation\n", |
|
|
2473 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
2474 |
" val_loss = []\n", |
|
|
2475 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
2476 |
" val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
2477 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
2478 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n", |
|
|
2479 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
2480 |
" val_acc = np.mean(val_acc)\n", |
|
|
2481 |
" val_loss = np.mean(val_loss)\n", |
|
|
2482 |
"\n", |
|
|
2483 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2484 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
2485 |
" \n", |
|
|
2486 |
" if epoch==900:\n", |
|
|
2487 |
"\n", |
|
|
2488 |
" check_id = np.arange(2000)\n", |
|
|
2489 |
" np.random.shuffle(check_id)\n", |
|
|
2490 |
" \n", |
|
|
2491 |
" #acc\n", |
|
|
2492 |
" acc = np.zeros(len(y_train))\n", |
|
|
2493 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
2494 |
" _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n", |
|
|
2495 |
" acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
2496 |
" acc = np.mean(acc)\n", |
|
|
2497 |
" \n", |
|
|
2498 |
" #validation\n", |
|
|
2499 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
2500 |
" val_loss = []\n", |
|
|
2501 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
2502 |
" val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n", |
|
|
2503 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
2504 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n", |
|
|
2505 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
2506 |
" val_acc = np.mean(val_acc)\n", |
|
|
2507 |
" val_loss = np.mean(val_loss)\n", |
|
|
2508 |
"\n", |
|
|
2509 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2510 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
2511 |
"\n", |
|
|
2512 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
2513 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
2514 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
2515 |
" fold_acc[rep, p] = acc\n", |
|
|
2516 |
" \n", |
|
|
2517 |
" \n", |
|
|
2518 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2519 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
2520 |
" \n", |
|
|
2521 |
" p = p + 1" |
|
|
2522 |
] |
|
|
2523 |
}, |
|
|
2524 |
{ |
|
|
2525 |
"cell_type": "code", |
|
|
2526 |
"execution_count": 1, |
|
|
2527 |
"metadata": { |
|
|
2528 |
"collapsed": true, |
|
|
2529 |
"jupyter": { |
|
|
2530 |
"outputs_hidden": true |
|
|
2531 |
} |
|
|
2532 |
}, |
|
|
2533 |
"outputs": [ |
|
|
2534 |
{ |
|
|
2535 |
"ename": "NameError", |
|
|
2536 |
"evalue": "name 'sio' is not defined", |
|
|
2537 |
"output_type": "error", |
|
|
2538 |
"traceback": [ |
|
|
2539 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
2540 |
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", |
|
|
2541 |
"\u001b[1;32m<ipython-input-1-e97a8aad55e1>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0msio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavemat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Result/Res_TemporalCNN.mat'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m{\u001b[0m\u001b[1;34m\"loss\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_loss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m\"acc\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_acc\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m\"val loss\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_vloss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m\"val acc\"\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mfold_vacc\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mfig\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfigure\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfigsize\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m12\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgrid\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mboxplot\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfold_vacc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2542 |
"\u001b[1;31mNameError\u001b[0m: name 'sio' is not defined" |
|
|
2543 |
] |
|
|
2544 |
} |
|
|
2545 |
], |
|
|
2546 |
"source": [ |
|
|
2547 |
"sio.savemat('Result/Res_TemporalCNN.mat',{\"loss\":fold_loss,\"acc\":fold_acc,\"val loss\":fold_vloss,\"val acc\":fold_vacc})\n", |
|
|
2548 |
"\n", |
|
|
2549 |
"fig = plt.figure(figsize=(12,10))\n", |
|
|
2550 |
"plt.grid()\n", |
|
|
2551 |
"plt.boxplot(fold_vacc)\n", |
|
|
2552 |
"plt.suptitle('Cross-Validation Accuracy\\n Temporal CNN')\n", |
|
|
2553 |
"ax = plt.gca()\n", |
|
|
2554 |
"plt.xlabel('Patient id')\n", |
|
|
2555 |
"plt.ylabel('Accuracy')\n", |
|
|
2556 |
"plt.savefig('Result/TemporalCNN.png')\n", |
|
|
2557 |
"plt.show()" |
|
|
2558 |
] |
|
|
2559 |
}, |
|
|
2560 |
{ |
|
|
2561 |
"cell_type": "code", |
|
|
2562 |
"execution_count": null, |
|
|
2563 |
"metadata": {}, |
|
|
2564 |
"outputs": [], |
|
|
2565 |
"source": [] |
|
|
2566 |
}, |
|
|
2567 |
{ |
|
|
2568 |
"cell_type": "code", |
|
|
2569 |
"execution_count": 4, |
|
|
2570 |
"metadata": { |
|
|
2571 |
"jupyter": { |
|
|
2572 |
"outputs_hidden": true |
|
|
2573 |
} |
|
|
2574 |
}, |
|
|
2575 |
"outputs": [ |
|
|
2576 |
{ |
|
|
2577 |
"name": "stdout", |
|
|
2578 |
"output_type": "stream", |
|
|
2579 |
"text": [ |
|
|
2580 |
"Begin Training rep 1/1\t of Patient 12\n" |
|
|
2581 |
] |
|
|
2582 |
}, |
|
|
2583 |
{ |
|
|
2584 |
"ename": "NameError", |
|
|
2585 |
"evalue": "name 'TempCNN' is not defined", |
|
|
2586 |
"output_type": "error", |
|
|
2587 |
"traceback": [ |
|
|
2588 |
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
2589 |
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", |
|
|
2590 |
"\u001b[1;32m<ipython-input-4-e7fae06453d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 25\u001b[0m (rep+1,n_rep, patient))\n\u001b[0;32m 26\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[0mCNN\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mTempCNN\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[0mcriterion\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mNLLLoss\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[0moptimizer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0moptim\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSGD\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mCNN\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlr\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.01\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", |
|
|
2591 |
"\u001b[1;31mNameError\u001b[0m: name 'TempCNN' is not defined" |
|
|
2592 |
] |
|
|
2593 |
} |
|
|
2594 |
], |
|
|
2595 |
"source": [ |
|
|
2596 |
"p = 0\n", |
|
|
2597 |
"n_rep = 1 \n", |
|
|
2598 |
"n_patient = len(np.unique(Patient))\n", |
|
|
2599 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
2600 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
2601 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
2602 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
2603 |
"\n", |
|
|
2604 |
"un = np.unique(Patient)\n", |
|
|
2605 |
"np.random.shuffle(un)\n", |
|
|
2606 |
"for patient in un:\n", |
|
|
2607 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
2608 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
2609 |
" \n", |
|
|
2610 |
" for rep in range(n_rep):\n", |
|
|
2611 |
" np.random.shuffle(id_patient)\n", |
|
|
2612 |
" np.random.shuffle(id_train)\n", |
|
|
2613 |
" \n", |
|
|
2614 |
" X_train = tmp[id_train]\n", |
|
|
2615 |
" X_test = tmp[id_patient]\n", |
|
|
2616 |
" y_train = Label[id_train]\n", |
|
|
2617 |
" y_test = Label[id_patient]\n", |
|
|
2618 |
" \n", |
|
|
2619 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
2620 |
" (rep+1,n_rep, patient))\n", |
|
|
2621 |
" \n", |
|
|
2622 |
" CNN = TempCNN().cuda()\n", |
|
|
2623 |
" criterion = nn.NLLLoss()\n", |
|
|
2624 |
" optimizer = optim.SGD(CNN.parameters(), lr=0.01)\n", |
|
|
2625 |
" \n", |
|
|
2626 |
" n_epochs = 45\n", |
|
|
2627 |
" for epoch in range(n_epochs):\n", |
|
|
2628 |
" running_loss = 0.0\n", |
|
|
2629 |
" batchsize = 32\n", |
|
|
2630 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
2631 |
" \n", |
|
|
2632 |
" CNN.to(torch.device(\"cuda\"))\n", |
|
|
2633 |
" optimizer.zero_grad()\n", |
|
|
2634 |
" # forward + backward + optimize\n", |
|
|
2635 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
2636 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
2637 |
" loss.backward()\n", |
|
|
2638 |
" optimizer.step()\n", |
|
|
2639 |
" running_loss += loss.item()\n", |
|
|
2640 |
" \n", |
|
|
2641 |
" if epoch%1==0:\n", |
|
|
2642 |
"\n", |
|
|
2643 |
" check_id = np.arange(2000)\n", |
|
|
2644 |
" np.random.shuffle(check_id)\n", |
|
|
2645 |
" \n", |
|
|
2646 |
" #acc\n", |
|
|
2647 |
" acc = np.zeros(len(y_train))\n", |
|
|
2648 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
2649 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n", |
|
|
2650 |
" acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
2651 |
" acc = np.mean(acc)\n", |
|
|
2652 |
" \n", |
|
|
2653 |
" #validation\n", |
|
|
2654 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
2655 |
" val_loss = []\n", |
|
|
2656 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
2657 |
" val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
2658 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
2659 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n", |
|
|
2660 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
2661 |
" val_acc = np.mean(val_acc)\n", |
|
|
2662 |
" val_loss = np.mean(val_loss)\n", |
|
|
2663 |
"\n", |
|
|
2664 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2665 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
2666 |
" \n", |
|
|
2667 |
" if epoch==900:\n", |
|
|
2668 |
"\n", |
|
|
2669 |
" check_id = np.arange(2000)\n", |
|
|
2670 |
" np.random.shuffle(check_id)\n", |
|
|
2671 |
" \n", |
|
|
2672 |
" #acc\n", |
|
|
2673 |
" acc = np.zeros(len(y_train))\n", |
|
|
2674 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
2675 |
" _, idx = torch.max(cnn_cpu(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32)).data,1)\n", |
|
|
2676 |
" acc[j*batchsize:(j+1)*batchsize] = (idx == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
2677 |
" acc = np.mean(acc)\n", |
|
|
2678 |
" \n", |
|
|
2679 |
" #validation\n", |
|
|
2680 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
2681 |
" val_loss = []\n", |
|
|
2682 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
2683 |
" val_outputs = cnn_cpu(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32))\n", |
|
|
2684 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
2685 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long)).item())\n", |
|
|
2686 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
2687 |
" val_acc = np.mean(val_acc)\n", |
|
|
2688 |
" val_loss = np.mean(val_loss)\n", |
|
|
2689 |
"\n", |
|
|
2690 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2691 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
2692 |
"\n", |
|
|
2693 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
2694 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
2695 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
2696 |
" fold_acc[rep, p] = acc\n", |
|
|
2697 |
" \n", |
|
|
2698 |
" \n", |
|
|
2699 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2700 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
2701 |
" \n", |
|
|
2702 |
" p = p + 1" |
|
|
2703 |
] |
|
|
2704 |
}, |
|
|
2705 |
{ |
|
|
2706 |
"cell_type": "code", |
|
|
2707 |
"execution_count": null, |
|
|
2708 |
"metadata": {}, |
|
|
2709 |
"outputs": [], |
|
|
2710 |
"source": [] |
|
|
2711 |
}, |
|
|
2712 |
{ |
|
|
2713 |
"cell_type": "code", |
|
|
2714 |
"execution_count": 40, |
|
|
2715 |
"metadata": {}, |
|
|
2716 |
"outputs": [], |
|
|
2717 |
"source": [ |
|
|
2718 |
"class LSTM(nn.Module):\n", |
|
|
2719 |
" def __init__(self):\n", |
|
|
2720 |
" super(LSTM, self).__init__()\n", |
|
|
2721 |
" \n", |
|
|
2722 |
" \n", |
|
|
2723 |
" self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n", |
|
|
2724 |
" self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2725 |
" self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2726 |
" self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2727 |
" self.pool1 = nn.MaxPool2d((2,2))\n", |
|
|
2728 |
" self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2729 |
" self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2730 |
" self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n", |
|
|
2731 |
" \n", |
|
|
2732 |
" self.lstm = nn.LSTM(4*4*128,128,7)\n", |
|
|
2733 |
" \n", |
|
|
2734 |
" self.pool = nn.MaxPool2d((7,1))\n", |
|
|
2735 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
2736 |
" self.fc = nn.Linear(896,4)\n", |
|
|
2737 |
" self.max = nn.LogSoftmax()\n", |
|
|
2738 |
" \n", |
|
|
2739 |
" self.hidden = self.init_hidden()\n", |
|
|
2740 |
"\n", |
|
|
2741 |
" def init_hidden(self):\n", |
|
|
2742 |
" return (torch.randn(7, 7, 128).cuda(),\n", |
|
|
2743 |
" torch.randn(7, 7, 128).cuda()) \n", |
|
|
2744 |
" \n", |
|
|
2745 |
" def forward(self, x):\n", |
|
|
2746 |
" if x.get_device() == 0:\n", |
|
|
2747 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n", |
|
|
2748 |
" else:\n", |
|
|
2749 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n", |
|
|
2750 |
" for i in range(7):\n", |
|
|
2751 |
" tmp[:,i] = self.pool1(F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n", |
|
|
2752 |
" x = tmp.reshape(x.shape[0], x.shape[1],4*128*4)\n", |
|
|
2753 |
" \n", |
|
|
2754 |
" lstm_out, self.hidden = self.lstm(x,self.hidden)\n", |
|
|
2755 |
" x = lstm_out.view(x.shape[0],-1)\n", |
|
|
2756 |
" \n", |
|
|
2757 |
" x = self.fc(x)\n", |
|
|
2758 |
" x = self.max(x)\n", |
|
|
2759 |
" return x" |
|
|
2760 |
] |
|
|
2761 |
}, |
|
|
2762 |
{ |
|
|
2763 |
"cell_type": "code", |
|
|
2764 |
"execution_count": 5, |
|
|
2765 |
"metadata": {}, |
|
|
2766 |
"outputs": [], |
|
|
2767 |
"source": [ |
|
|
2768 |
"class LSTM(nn.Module):\n", |
|
|
2769 |
" def __init__(self):\n", |
|
|
2770 |
" super(LSTM, self).__init__()\n", |
|
|
2771 |
" \n", |
|
|
2772 |
" \n", |
|
|
2773 |
" self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n", |
|
|
2774 |
" self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2775 |
" self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2776 |
" self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2777 |
" self.pool1 = nn.MaxPool2d((2,2))\n", |
|
|
2778 |
" self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2779 |
" self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2780 |
" self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n", |
|
|
2781 |
" \n", |
|
|
2782 |
" self.lstm = nn.RNN(4*4*128,128,7)\n", |
|
|
2783 |
" \n", |
|
|
2784 |
" self.pool = nn.MaxPool2d((7,1))\n", |
|
|
2785 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
2786 |
" self.fc = nn.Linear(896,4)\n", |
|
|
2787 |
" self.max = nn.LogSoftmax()\n", |
|
|
2788 |
" \n", |
|
|
2789 |
" self.lstm_out = torch.zeros(2,7,128)\n", |
|
|
2790 |
" \n", |
|
|
2791 |
" def forward(self, x):\n", |
|
|
2792 |
" if x.get_device() == 0:\n", |
|
|
2793 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n", |
|
|
2794 |
" else:\n", |
|
|
2795 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n", |
|
|
2796 |
" for i in range(7):\n", |
|
|
2797 |
" img = x[:,i]\n", |
|
|
2798 |
" img = F.relu(self.conv1(img))\n", |
|
|
2799 |
" img = F.relu(self.conv2(img))\n", |
|
|
2800 |
" img = F.relu(self.conv3(img))\n", |
|
|
2801 |
" img = F.relu(self.conv4(img))\n", |
|
|
2802 |
" img = self.pool1(img)\n", |
|
|
2803 |
" img = F.relu(self.conv5(img))\n", |
|
|
2804 |
" img = F.relu(self.conv6(img))\n", |
|
|
2805 |
" img = self.pool1(img)\n", |
|
|
2806 |
" img = F.relu(self.conv7(img))\n", |
|
|
2807 |
" #x[:,i,]\n", |
|
|
2808 |
" tmp[:,i] = self.pool1(img)\n", |
|
|
2809 |
" del img\n", |
|
|
2810 |
" #tmp[:,i] = self.pool1(F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n", |
|
|
2811 |
" x = tmp.reshape(x.shape[0], x.shape[1],4*128*4)\n", |
|
|
2812 |
" del tmp\n", |
|
|
2813 |
" #self.lstm_out, self.hidden = self.lstm(x,self.hidden)\n", |
|
|
2814 |
" self.lstm_out, _ = self.lstm(x)\n", |
|
|
2815 |
" \n", |
|
|
2816 |
" x = self.lstm_out.view(x.shape[0],-1)\n", |
|
|
2817 |
" x = self.fc(x)\n", |
|
|
2818 |
" x = self.max(x)\n", |
|
|
2819 |
" return x" |
|
|
2820 |
] |
|
|
2821 |
}, |
|
|
2822 |
{ |
|
|
2823 |
"cell_type": "code", |
|
|
2824 |
"execution_count": 6, |
|
|
2825 |
"metadata": {}, |
|
|
2826 |
"outputs": [ |
|
|
2827 |
{ |
|
|
2828 |
"name": "stderr", |
|
|
2829 |
"output_type": "stream", |
|
|
2830 |
"text": [ |
|
|
2831 |
"/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:51: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
2832 |
] |
|
|
2833 |
}, |
|
|
2834 |
{ |
|
|
2835 |
"data": { |
|
|
2836 |
"text/plain": [ |
|
|
2837 |
"tensor([[-1.3417, -1.3869, -1.4236, -1.3948],\n", |
|
|
2838 |
" [-1.3531, -1.3826, -1.4254, -1.3854]], device='cuda:0',\n", |
|
|
2839 |
" grad_fn=<LogSoftmaxBackward>)" |
|
|
2840 |
] |
|
|
2841 |
}, |
|
|
2842 |
"execution_count": 6, |
|
|
2843 |
"metadata": {}, |
|
|
2844 |
"output_type": "execute_result" |
|
|
2845 |
} |
|
|
2846 |
], |
|
|
2847 |
"source": [ |
|
|
2848 |
"net = LSTM().cuda()\n", |
|
|
2849 |
"net(torch.from_numpy(tmp[0:2]).to(torch.float32).cuda())" |
|
|
2850 |
] |
|
|
2851 |
}, |
|
|
2852 |
{ |
|
|
2853 |
"cell_type": "code", |
|
|
2854 |
"execution_count": null, |
|
|
2855 |
"metadata": {}, |
|
|
2856 |
"outputs": [ |
|
|
2857 |
{ |
|
|
2858 |
"name": "stdout", |
|
|
2859 |
"output_type": "stream", |
|
|
2860 |
"text": [ |
|
|
2861 |
"Begin Training rep 1/1\t of Patient 6\n" |
|
|
2862 |
] |
|
|
2863 |
}, |
|
|
2864 |
{ |
|
|
2865 |
"name": "stderr", |
|
|
2866 |
"output_type": "stream", |
|
|
2867 |
"text": [ |
|
|
2868 |
"/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:51: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
2869 |
] |
|
|
2870 |
}, |
|
|
2871 |
{ |
|
|
2872 |
"name": "stdout", |
|
|
2873 |
"output_type": "stream", |
|
|
2874 |
"text": [ |
|
|
2875 |
"[1, 50] loss: 1.401\tAccuracy : 0.284\t\tval-loss: 1.375\tval-Accuracy : 0.291\n", |
|
|
2876 |
"[6, 50] loss: 0.759\tAccuracy : 0.744\t\tval-loss: 0.395\tval-Accuracy : 0.898\n", |
|
|
2877 |
"[11, 50] loss: 0.180\tAccuracy : 0.951\t\tval-loss: 0.045\tval-Accuracy : 0.995\n", |
|
|
2878 |
"[16, 50] loss: 0.116\tAccuracy : 0.970\t\tval-loss: 0.029\tval-Accuracy : 0.995\n", |
|
|
2879 |
"[21, 50] loss: 0.067\tAccuracy : 0.979\t\tval-loss: 0.028\tval-Accuracy : 0.995\n", |
|
|
2880 |
"[26, 50] loss: 0.033\tAccuracy : 0.987\t\tval-loss: 0.017\tval-Accuracy : 0.990\n" |
|
|
2881 |
] |
|
|
2882 |
} |
|
|
2883 |
], |
|
|
2884 |
"source": [ |
|
|
2885 |
"p = 0\n", |
|
|
2886 |
"n_rep = 1 \n", |
|
|
2887 |
"n_patient = len(np.unique(Patient))\n", |
|
|
2888 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
2889 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
2890 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
2891 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
2892 |
"\n", |
|
|
2893 |
"un = np.unique(Patient)\n", |
|
|
2894 |
"np.random.shuffle(un)\n", |
|
|
2895 |
"for patient in un:\n", |
|
|
2896 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
2897 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
2898 |
" \n", |
|
|
2899 |
" for rep in range(n_rep):\n", |
|
|
2900 |
" np.random.shuffle(id_patient)\n", |
|
|
2901 |
" np.random.shuffle(id_train)\n", |
|
|
2902 |
" \n", |
|
|
2903 |
" X_train = tmp[id_train]\n", |
|
|
2904 |
" X_test = tmp[id_patient]\n", |
|
|
2905 |
" y_train = Label[id_train]\n", |
|
|
2906 |
" y_test = Label[id_patient]\n", |
|
|
2907 |
" \n", |
|
|
2908 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
2909 |
" (rep+1,n_rep, patient))\n", |
|
|
2910 |
" \n", |
|
|
2911 |
" CNN = LSTM().cuda()\n", |
|
|
2912 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
2913 |
" optimizer = optim.Adam(CNN.parameters(), lr=0.0001)\n", |
|
|
2914 |
" \n", |
|
|
2915 |
" \n", |
|
|
2916 |
" n_epochs = 50\n", |
|
|
2917 |
" for epoch in range(n_epochs):\n", |
|
|
2918 |
" running_loss = 0.0\n", |
|
|
2919 |
" batchsize = 32\n", |
|
|
2920 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
2921 |
" #print(i)\n", |
|
|
2922 |
" optimizer.zero_grad()\n", |
|
|
2923 |
" # forward + backward + optimize\n", |
|
|
2924 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda().detach())\n", |
|
|
2925 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
2926 |
" loss.backward(retain_graph=True)\n", |
|
|
2927 |
" optimizer.step()\n", |
|
|
2928 |
" running_loss += loss.item() \n", |
|
|
2929 |
" \n", |
|
|
2930 |
" \n", |
|
|
2931 |
" if epoch%5==0:\n", |
|
|
2932 |
"\n", |
|
|
2933 |
" check_id = np.arange(2000)\n", |
|
|
2934 |
" np.random.shuffle(check_id)\n", |
|
|
2935 |
" \n", |
|
|
2936 |
" #acc\n", |
|
|
2937 |
" acc = np.zeros(len(y_train))\n", |
|
|
2938 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
2939 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n", |
|
|
2940 |
" acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
2941 |
" acc = np.mean(acc)\n", |
|
|
2942 |
" \n", |
|
|
2943 |
" #validation\n", |
|
|
2944 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
2945 |
" val_loss = []\n", |
|
|
2946 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
2947 |
" val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
2948 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
2949 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n", |
|
|
2950 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
2951 |
" val_acc = np.mean(val_acc)\n", |
|
|
2952 |
" val_loss = np.mean(val_loss)\n", |
|
|
2953 |
"\n", |
|
|
2954 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2955 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
2956 |
" \n", |
|
|
2957 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
2958 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
2959 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
2960 |
" fold_acc[rep, p] = acc\n", |
|
|
2961 |
" \n", |
|
|
2962 |
" \n", |
|
|
2963 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
2964 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
2965 |
" \n", |
|
|
2966 |
" p = p + 1" |
|
|
2967 |
] |
|
|
2968 |
}, |
|
|
2969 |
{ |
|
|
2970 |
"cell_type": "markdown", |
|
|
2971 |
"metadata": {}, |
|
|
2972 |
"source": [ |
|
|
2973 |
"## Mix Architecture" |
|
|
2974 |
] |
|
|
2975 |
}, |
|
|
2976 |
{ |
|
|
2977 |
"cell_type": "code", |
|
|
2978 |
"execution_count": 37, |
|
|
2979 |
"metadata": {}, |
|
|
2980 |
"outputs": [], |
|
|
2981 |
"source": [ |
|
|
2982 |
"class Mix(nn.Module):\n", |
|
|
2983 |
" def __init__(self):\n", |
|
|
2984 |
" super(Mix, self).__init__()\n", |
|
|
2985 |
" self.conv1 = nn.Conv2d(3,32,(3,3),stride=(1,1), padding=1)\n", |
|
|
2986 |
" self.conv2 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2987 |
" self.conv3 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2988 |
" self.conv4 = nn.Conv2d(32,32,(3,3),stride=1, padding=1)\n", |
|
|
2989 |
" self.pool1 = nn.MaxPool2d((2,2))\n", |
|
|
2990 |
" self.conv5 = nn.Conv2d(32,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2991 |
" self.conv6 = nn.Conv2d(64,64,(3,3),stride=(1,1),padding=1)\n", |
|
|
2992 |
" self.conv7 = nn.Conv2d(64,128,(3,3),stride=(1,1),padding=1)\n", |
|
|
2993 |
" \n", |
|
|
2994 |
" #\n", |
|
|
2995 |
" self.conv8 = nn.Conv2d(7,64,(4*4*128,3),stride=(1,1),padding=1)\n", |
|
|
2996 |
" self.lstm = nn.RNN(4*4*128,128,7)\n", |
|
|
2997 |
" \n", |
|
|
2998 |
" self.pool = nn.MaxPool2d((7,1))\n", |
|
|
2999 |
" self.drop = nn.Dropout(p=0.5)\n", |
|
|
3000 |
" self.fc1 = nn.Linear(1088,512)\n", |
|
|
3001 |
" self.fc2 = nn.Linear(512,4)\n", |
|
|
3002 |
" self.max = nn.LogSoftmax()\n", |
|
|
3003 |
" \n", |
|
|
3004 |
" self.lstm_out = torch.zeros(2,7,128)\n", |
|
|
3005 |
" \n", |
|
|
3006 |
" def forward(self, x):\n", |
|
|
3007 |
" if x.get_device() == 0:\n", |
|
|
3008 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cuda()\n", |
|
|
3009 |
" else:\n", |
|
|
3010 |
" tmp = torch.zeros(x.shape[0],x.shape[1],128,4,4).cpu()\n", |
|
|
3011 |
" for i in range(7):\n", |
|
|
3012 |
" img = x[:,i]\n", |
|
|
3013 |
" img = F.relu(self.conv1(img))\n", |
|
|
3014 |
" img = F.relu(self.conv2(img))\n", |
|
|
3015 |
" img = F.relu(self.conv3(img))\n", |
|
|
3016 |
" img = F.relu(self.conv4(img))\n", |
|
|
3017 |
" img = self.pool1(img)\n", |
|
|
3018 |
" img = F.relu(self.conv5(img))\n", |
|
|
3019 |
" img = F.relu(self.conv6(img))\n", |
|
|
3020 |
" img = self.pool1(img)\n", |
|
|
3021 |
" img = F.relu(self.conv7(img))\n", |
|
|
3022 |
" #x[:,i,]\n", |
|
|
3023 |
" tmp[:,i] = self.pool1(img)\n", |
|
|
3024 |
" del img\n", |
|
|
3025 |
" #tmp[:,i] = self.pool1(F.relu(self.conv7(self.pool1(F.relu(self.conv6(F.relu(self.conv5(self.pool1( F.relu(self.conv4(F.relu(self.conv3( F.relu(self.conv2(F.relu(self.conv1(x[:,i])))))))))))))))))\n", |
|
|
3026 |
" \n", |
|
|
3027 |
" temp_conv = F.relu(self.conv8(tmp.reshape(x.shape[0], x.shape[1], 4*128*4,1)))\n", |
|
|
3028 |
" temp_conv = temp_conv.reshape(temp_conv.shape[0],-1)\n", |
|
|
3029 |
" \n", |
|
|
3030 |
" self.lstm_out, _ = self.lstm(tmp.reshape(x.shape[0], x.shape[1], 4*128*4))\n", |
|
|
3031 |
" lstm = self.lstm_out.view(x.shape[0],-1)\n", |
|
|
3032 |
" \n", |
|
|
3033 |
" x = torch.cat((temp_conv, lstm), 1)\n", |
|
|
3034 |
" del tmp\n", |
|
|
3035 |
" #self.lstm_out, self.hidden = self.lstm(x,self.hidden)\n", |
|
|
3036 |
" \n", |
|
|
3037 |
" x = self.fc1(x)\n", |
|
|
3038 |
" x = self.fc2(x)\n", |
|
|
3039 |
" x = self.max(x)\n", |
|
|
3040 |
" return x" |
|
|
3041 |
] |
|
|
3042 |
}, |
|
|
3043 |
{ |
|
|
3044 |
"cell_type": "code", |
|
|
3045 |
"execution_count": 38, |
|
|
3046 |
"metadata": { |
|
|
3047 |
"collapsed": true |
|
|
3048 |
}, |
|
|
3049 |
"outputs": [ |
|
|
3050 |
{ |
|
|
3051 |
"name": "stdout", |
|
|
3052 |
"output_type": "stream", |
|
|
3053 |
"text": [ |
|
|
3054 |
"Begin Training rep 1/1\t of Patient 11\n" |
|
|
3055 |
] |
|
|
3056 |
}, |
|
|
3057 |
{ |
|
|
3058 |
"name": "stderr", |
|
|
3059 |
"output_type": "stream", |
|
|
3060 |
"text": [ |
|
|
3061 |
"/home/vdelv/anaconda3/envs/Pytorch_EEG/lib/python3.7/site-packages/ipykernel_launcher.py:58: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\n" |
|
|
3062 |
] |
|
|
3063 |
}, |
|
|
3064 |
{ |
|
|
3065 |
"name": "stdout", |
|
|
3066 |
"output_type": "stream", |
|
|
3067 |
"text": [ |
|
|
3068 |
"[1, 50] loss: 1.317\tAccuracy : 0.492\t\tval-loss: 0.935\tval-Accuracy : 0.498\n", |
|
|
3069 |
"[6, 50] loss: 0.345\tAccuracy : 0.888\t\tval-loss: 0.296\tval-Accuracy : 0.867\n", |
|
|
3070 |
"[11, 50] loss: 0.123\tAccuracy : 0.948\t\tval-loss: 0.260\tval-Accuracy : 0.924\n", |
|
|
3071 |
"[16, 50] loss: 0.076\tAccuracy : 0.965\t\tval-loss: 0.259\tval-Accuracy : 0.911\n" |
|
|
3072 |
] |
|
|
3073 |
}, |
|
|
3074 |
{ |
|
|
3075 |
"ename": "KeyboardInterrupt", |
|
|
3076 |
"evalue": "", |
|
|
3077 |
"output_type": "error", |
|
|
3078 |
"traceback": [ |
|
|
3079 |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", |
|
|
3080 |
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", |
|
|
3081 |
"\u001b[0;32m<ipython-input-38-270da0643345>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0macc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0macc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 55\u001b[0;31m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCNN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 56\u001b[0m \u001b[0macc\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0midx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"cpu\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mbatchsize\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0macc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0macc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", |
|
|
3082 |
"\u001b[0;31mKeyboardInterrupt\u001b[0m: " |
|
|
3083 |
] |
|
|
3084 |
} |
|
|
3085 |
], |
|
|
3086 |
"source": [ |
|
|
3087 |
"p = 0\n", |
|
|
3088 |
"n_rep = 1 \n", |
|
|
3089 |
"n_patient = len(np.unique(Patient))\n", |
|
|
3090 |
"fold_vloss = np.zeros((n_rep,n_patient))\n", |
|
|
3091 |
"fold_loss = np.zeros((n_rep,n_patient))\n", |
|
|
3092 |
"fold_vacc = np.zeros((n_rep,n_patient))\n", |
|
|
3093 |
"fold_acc = np.zeros((n_rep,n_patient))\n", |
|
|
3094 |
"\n", |
|
|
3095 |
"un = np.unique(Patient)\n", |
|
|
3096 |
"np.random.shuffle(un)\n", |
|
|
3097 |
"for patient in un:\n", |
|
|
3098 |
" id_patient = np.arange(len(tmp))[Patient==patient]\n", |
|
|
3099 |
" id_train = np.arange(len(tmp))[Patient!=patient]\n", |
|
|
3100 |
" \n", |
|
|
3101 |
" for rep in range(n_rep):\n", |
|
|
3102 |
" np.random.shuffle(id_patient)\n", |
|
|
3103 |
" np.random.shuffle(id_train)\n", |
|
|
3104 |
" \n", |
|
|
3105 |
" X_train = tmp[id_train]\n", |
|
|
3106 |
" X_test = tmp[id_patient]\n", |
|
|
3107 |
" y_train = Label[id_train]\n", |
|
|
3108 |
" y_test = Label[id_patient]\n", |
|
|
3109 |
" \n", |
|
|
3110 |
" print(\"Begin Training rep %d/%d\\t of Patient %d\" % \n", |
|
|
3111 |
" (rep+1,n_rep, patient))\n", |
|
|
3112 |
" \n", |
|
|
3113 |
" CNN = Mix().cuda()\n", |
|
|
3114 |
" criterion = nn.CrossEntropyLoss()\n", |
|
|
3115 |
" optimizer = optim.Adam(CNN.parameters(), lr=0.0001)\n", |
|
|
3116 |
" \n", |
|
|
3117 |
" \n", |
|
|
3118 |
" n_epochs = 50\n", |
|
|
3119 |
" for epoch in range(n_epochs):\n", |
|
|
3120 |
" running_loss = 0.0\n", |
|
|
3121 |
" batchsize = 32\n", |
|
|
3122 |
" for i in range(int(len(y_train)/batchsize)):\n", |
|
|
3123 |
" #print(i)\n", |
|
|
3124 |
" optimizer.zero_grad()\n", |
|
|
3125 |
" # forward + backward + optimize\n", |
|
|
3126 |
" outputs = CNN(torch.from_numpy(X_train[i*batchsize:(i+1)*batchsize]).to(torch.float32).cuda().detach())\n", |
|
|
3127 |
" loss = criterion(outputs, torch.from_numpy(y_train[i*batchsize:(i+1)*batchsize]).to(torch.long).cuda())\n", |
|
|
3128 |
" loss.backward(retain_graph=True)\n", |
|
|
3129 |
" optimizer.step()\n", |
|
|
3130 |
" running_loss += loss.item() \n", |
|
|
3131 |
" \n", |
|
|
3132 |
" \n", |
|
|
3133 |
" if epoch%5==0:\n", |
|
|
3134 |
"\n", |
|
|
3135 |
" check_id = np.arange(2000)\n", |
|
|
3136 |
" np.random.shuffle(check_id)\n", |
|
|
3137 |
" \n", |
|
|
3138 |
" #acc\n", |
|
|
3139 |
" acc = np.zeros(len(y_train))\n", |
|
|
3140 |
" for j in range(int(len(acc)/batchsize)+1):\n", |
|
|
3141 |
" _, idx = torch.max(CNN(torch.from_numpy(X_train[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda()).data,1)\n", |
|
|
3142 |
" acc[j*batchsize:(j+1)*batchsize] = (idx.to(torch.device(\"cpu\")) == torch.from_numpy(y_train[j*batchsize:(j+1)*batchsize])).numpy() + 0 \n", |
|
|
3143 |
" acc = np.mean(acc)\n", |
|
|
3144 |
" \n", |
|
|
3145 |
" #validation\n", |
|
|
3146 |
" val_acc = np.zeros(len(y_test))\n", |
|
|
3147 |
" val_loss = []\n", |
|
|
3148 |
" for j in range(int(len(val_acc)/batchsize)+1):\n", |
|
|
3149 |
" val_outputs = CNN(torch.from_numpy(X_test[j*batchsize:(j+1)*batchsize]).to(torch.float32).cuda())\n", |
|
|
3150 |
" _, idx = torch.max(val_outputs.data,1)\n", |
|
|
3151 |
" val_loss.append(criterion(val_outputs, torch.from_numpy(y_test[j*batchsize:(j+1)*batchsize]).to(torch.long).cuda()).item())\n", |
|
|
3152 |
" val_acc[j*batchsize:(j+1)*batchsize] = (idx.cpu().numpy() == y_test[j*batchsize:(j+1)*batchsize])+0\n", |
|
|
3153 |
" val_acc = np.mean(val_acc)\n", |
|
|
3154 |
" val_loss = np.mean(val_loss)\n", |
|
|
3155 |
"\n", |
|
|
3156 |
" print('[%d, %3d] loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
3157 |
" (epoch+1, n_epochs, running_loss/i, float(acc), val_loss, val_acc))\n", |
|
|
3158 |
" \n", |
|
|
3159 |
" fold_vloss[rep, p ] = val_loss\n", |
|
|
3160 |
" fold_loss[rep, p] = running_loss/i\n", |
|
|
3161 |
" fold_vacc[rep, p] = val_acc\n", |
|
|
3162 |
" fold_acc[rep, p] = acc\n", |
|
|
3163 |
" \n", |
|
|
3164 |
" \n", |
|
|
3165 |
" print('loss: %.3f\\tAccuracy : %.3f\\t\\tval-loss: %.3f\\tval-Accuracy : %.3f' %\n", |
|
|
3166 |
" (np.mean(fold_loss[:,p]), np.mean(fold_acc[:,p]), np.mean(fold_vloss[:,p]),np.mean(fold_vacc[:,p])))\n", |
|
|
3167 |
" \n", |
|
|
3168 |
" p = p + 1" |
|
|
3169 |
] |
|
|
3170 |
}, |
|
|
3171 |
{ |
|
|
3172 |
"cell_type": "markdown", |
|
|
3173 |
"metadata": {}, |
|
|
3174 |
"source": [ |
|
|
3175 |
"# Results " |
|
|
3176 |
] |
|
|
3177 |
}, |
|
|
3178 |
{ |
|
|
3179 |
"cell_type": "code", |
|
|
3180 |
"execution_count": 6, |
|
|
3181 |
"metadata": {}, |
|
|
3182 |
"outputs": [], |
|
|
3183 |
"source": [ |
|
|
3184 |
"fold_vloss = sio.loadmat(\"result_LSTM.mat\")['vloss']\n", |
|
|
3185 |
"fold_loss = sio.loadmat(\"result_LSTM.mat\")['loss']\n", |
|
|
3186 |
"fold_vacc = sio.loadmat(\"result_LSTM.mat\")['vacc']\n", |
|
|
3187 |
"fold_acc = sio.loadmat(\"result_LSTM.mat\")['acc'] " |
|
|
3188 |
] |
|
|
3189 |
}, |
|
|
3190 |
{ |
|
|
3191 |
"cell_type": "code", |
|
|
3192 |
"execution_count": 12, |
|
|
3193 |
"metadata": {}, |
|
|
3194 |
"outputs": [ |
|
|
3195 |
{ |
|
|
3196 |
"data": { |
|
|
3197 |
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD4CAYAAAD8Zh1EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAXvklEQVR4nO3de4xc513G8e+TddKLN5fdul21dWiCMGUTC7XdKCl0qXYwRU6pHCgGxQKLwKbmD2ygraCJFiVO0IqruDWhFcqWa71L6A0TGRIIs4WVaIlN02B7SDFpaZKmTZpuG7aVSB1+/DHH7ng99z1nZufd5yONPOfMmfN73/XOs2fOvHNeRQRmZjb4Luh3A8zMLB8OdDOzRDjQzcwS4UA3M0uEA93MLBGb+lV4y5YtccUVV3T0nK9//ets3ry5mAb1uE5KfUmtTkp9Sa1OSn3pts6xY8e+HBEvr/tgRPTlNjExEZ0ql8sdP6cbvaiTUl9Sq5NSX1Krk1Jfuq0DHI0GuepTLmZmiXCgm5klwoFuZpYIB7qZWSIc6GZmiWgZ6JI+IOlpSccbPC5JfyDplKRHJL0h/2aamVkr7Ryh/wmws8nj1wPbsts+4H1rb5aZmXWqZaBHxD8BX2myyQ3An2VDJD8BXCbplXk10MzM2qNo43rokq4A7ouI7XUeuw/49YhYypYfBN4TEUfrbLuP6lE8Y2NjEwsLCx01dmVlheHh4Y6e041e1EmpL6nVSaEvpVKp4WPlcrmQmv6/6U2dUql0LCKuqftgo28c1d6AK4DjDR67D5isWX4QuKbVPv1N0eJr9KLOoUOH4uqrr44LLrggrr766jh06FCh9fx/07nqy7x4/r/pTR2afFM0j2u5PAlcXrO8NVu3ro2OjrK8vNzRc0ZGRvjKV5qdfdpY5ufnmZmZYW5ujhdeeIGhoSGmp6cB2LNnT59btwEdvLTu6rj9koaPcfBrBTbIei2PQD8M7Je0AFwHfC0insphv4VaXl4+847iPIuLi0xNTZ23XtKa687PzzM7O0ulUmF8fJyZmZmBDb/Z2Vnm5uYolUpnf2Zzc3McOHBgYPs0yHTHc3V/p5v9PsfB4ttlvdMy0CXNA1PAFklPALcDFwJExPuBI8BbgVPAN4CfLqqxeWp21DIFsNjgOWuQ2hFtpVJhcnLynHWTk5NUKpU+tciS0eFr81vP29jvOFoGekQ0TZrsnM7P5daiHml0NAPFHdGkdkQ7Pj7O0tLSOR/ALS0tMT4+3sdWWQo6fbcBfscBfbwe+nrQ6SmUkZGRNdVL4oi25sjp+G7g4z8MH//WkVPpzPraI6wNftRk3en16zMFGzbQGx2dQ/aXvo3hnJ1K4oh2VTin9JmArR+NXn9FvTZTsWEDvR9mZmaYnp4+ew69XC4zPT3N7Oxsv5vWtT179rBnz56mb4XXqtmR2iC8uD2iam1W//+vXh6E34FecaD30Jkj1wMHDpw9op2dnfURbQu1L9hBPEJrNKKq1flgq6r92RV54JACBzr1Xzy16/IMkF4c0faST7m01mhE1RQ0HLGx1hFVtjE50Dk/sFMJ26KlNgyzMA0+FB7Edxu2vvl66Na12mGYmzZtolQqMTc3t+bPBEZHR5FU9wbUXT86OppHlwrXrC9ma+VAt64VNQzzzDnnerdyuVx3facfOvZLs76YrZVPuVjXkhiGmZhOjvQ9bjs9DnTrWlHDMPtxWYYUeOy2OdALlvIY5KKGYfbjsgypaTZ22+GeLgd6wVIfg1zUMEx/7XttPHZ7Y3KgF8xjkDvXj8symKXAgV4wXzXOzHrFgZ6gQb/2iZl1x4GeoEG/9omZdcdfLDIzS4SP0HvAIzbMrBd8hF6wTr/CHhEDMQbdzNYfH6HbuucvyZi1x4Fu656/JGMpKXIUmgPdzKyHihyF5nPoZmaJcKCbmSXCgW5mlggHeiIaTdsG9adsG6Rp28ysPQ70RDSatq3ZePdBmbbNzNrjQO+x+fl5tm/fzo4dO9i+fTvz8/P9bpKZJcLDFntofn6emZmZs1O2DQ0NMT09DbDmWX7MbP1qNnNZvXHp3c5a5kDvodnZWebm5iiVSme/IDM3N8eBAwfWHOieSMNs/ep05rJuZy1zoPdQpVJhcnLynHWTk5NUKpU179sTaZhZW+fQJe2U9KikU5JuqfP4ayQ9KOkRSYuStubf1ME3Pj7O0tLSOeuWlpYYHx/vU4vMLCUtj9AlDQF3A28BngAeknQ4Ik7WbPbbwJ9FxJ9K+n7g14C9RTR4kM3MzDA9PX32HHq5XGZ6eprZ2dl+N83MCtTpKdFuT4e2c8rlWuBURDwGIGkBuAGoDfSrgHdl98vAx7pqTeLOnCc/cOAAlUqF8fFxZmdnc/tA1NddN1ufOj0l2u3pULW6MIyk3cDOiLg5W94LXBcR+2u2OQR8MiJ+X9LbgQ8DWyLi2VX72gfsAxgbG5tYWFjoqLErKysMDw939Jxu9KJOSn1JrU5KfUmtzqD2pVQqUS6X267TaPvssWMRcU3dBxt96eTMDdgN3FOzvBe4a9U2rwI+AnwK+H2qp2Yua7bfiYmJ6FS5XO74Od3oRZ2U+pJanZT6klqdQe1LNWrbr9No++yxo9EgV9s55fIkcHnN8tZsXe0fhS8AbweQNAz8aER8tY19m5lZTtoZ5fIQsE3SlZIuAm4EDtduIGmLpDP7uhX4QL7NNDOzVloGekScBvYD9wMV4N6IOCHpTkm7ss2mgEclfQYYAzxsw8ysx9r6YlFEHAGOrFp3W839DwEfyrdpZmbWCV+cy8wsEQ50M7NEONDNzBLhQDczS4Svtmhm1gOdXJqj28tyONDNzAoWDS6xIqnhY93wKRczs0Q40M3MEuFANzNLhAPdzCwRDvREzc/Ps337dnbs2MH27duZn5/vd5MsQZLO3kql0jnL1nse5ZKg+fl5ZmZmzk51NzQ0xPT0NEBusyOZwbmjN/IesWGd8xF6gmZnZ5mbm6NUKrFp0yZKpRJzc3Oeu9QscQ70BFUqFSYnJ89ZNzk5SaVS6VOLzKwXHOgJGh8fZ2lp6Zx1S0tLjI+P96lFZtYLDvQEzczMMD09Tblc5vTp05TLZaanp5mZmel308ysQP5QNEFnPvg8cOAAlUqF8fFxZmdn/YGo2TqwegRQ7fJaP1T2EXqi9uzZw/Hjx3nwwQc5fvy4w9xsnYiIs7dyuXzO8lo50M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEtFWoEvaKelRSack3VLn8W+TVJb0KUmPSHpr/k01s34bHR09ZyLo1ZNC11s/Ojra51ZvHC0DXdIQcDdwPXAVsEfSVas2+xXg3oh4PXAj8Id5N9TM+m95efmcy702uxTsmdvy8nK/m71htHOEfi1wKiIei4jngQXghlXbBHBJdv9S4Av5NdHMzNqhVhdVl7Qb2BkRN2fLe4HrImJ/zTavBB4ARoDNwA9ExLE6+9oH7AMYGxubWFhY6KixKysrDA8Pd/ScbvSiTkp9Sa1OSn3Ju06pVKJcLndUp9lzOjWIP7O865RKpWMRcU3dBxu9faqZQWM3cE/N8l7grlXbvAt4d3b/e4CTwAXN9jsxMRGdKpfLHT+nG72ok1JfUquTUl/yrlONjM7qNHtOpwbxZ5Z3HeBoNMjVdk65PAlcXrO8NVtXaxq4N/sD8S/Ai4EtbezbzMxy0k6gPwRsk3SlpIuofuh5eNU2nwd2AEgapxroz+TZUDMza65loEfEaWA/cD9QoTqa5YSkOyXtyjZ7N/AOSZ8G5oGbsrcGZmbWI5va2SgijgBHVq27reb+SeBN+Tat6sz41gbtKqKkmdlAaivQ+6k2tCU5xM3MGvBX/83MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwR6zLQG01EC56E1syskXUZ6I0movUktGZmja3Lqy3G7ZfAwUvPWz8FsNhgezMrXKPXJvj1uR6sy0DXHc/VvUzu4uIiU1NT528vEQeLb5fZRtfotQl+fa4H6/KUi5mZdc6BbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWCAe6mVki1uW1XICzl8ttx8jISIEtMTMbDOsy0Btd/EdSw8fMzDa6dRnoZrZ+dfLuGfwOupcc6GbWtmbvkP0Ouv/a+lBU0k5Jj0o6JemWOo//rqSHs9tnJH01rwY2m4LOzMy+pWWgSxoC7gauB64C9ki6qnabiHhnRLwuIl4HvBf4SF4NbDYFnZmZfUs7R+jXAqci4rGIeB5YAG5osv0eYD6PxpmZWfvU6khX0m5gZ0TcnC3vBa6LiP11tn0N8Alga0S8UOfxfcA+gLGxsYmFhYWOGruyssLw8HBHz+lGL+qk1JfU6qTUl17WKZVKlMvlQmuk9jPrpk6pVDoWEdfUfbD2FEa9G7AbuKdmeS9wV4Nt3wO8t9U+I4KJiYnoVLlc7vg53ehFnZT6klqdlPrSyzrVOClWaj+zbuoAR6NBrrZzyuVJ4PKa5a3ZunpuxKdbzDYMD1pYX9oJ9IeAbZKulHQR1dA+vHojSd8FjAD/km8TzWy9qj069KCF/msZ6BFxGtgP3A9UgHsj4oSkOyXtqtn0RmAh/D9pZtYXbX2xKCKOAEdWrbtt1fLB/JplZmad8tUWzcwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRLR1cS6z1LW6frcvImqDwIFuxrmBLckBbgPJp1xswxodHT1nhp1GM+/U3kZHR/vcarPGHOi2YS0vL9edl3H1zDu1t+Xl5X4326whB7qZWSIc6GZmiXCgm5klwqNcbMOK2y+Bg5eet34KYLHJc8zWKQe6bVi647m6wxMXFxeZmpqq/xwJT4du65VPuZiZJcKBbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWCAe6mVkiHOhmZolwoJuZJcKBbmaWiLYCXdJOSY9KOiXplgbb/Likk5JOSDqUbzPNzKyVltdykTQE3A28BXgCeEjS4Yg4WbPNNuBW4E0RsSzpFUU12MysCM3mlR2UKQnbOUK/FjgVEY9FxPPAAnDDqm3eAdwdEcsAEfF0vs00MytW7cxU9ZYHgVo1VtJuYGdE3Jwt7wWui4j9Ndt8DPgM8CZgCDgYEX9XZ1/7gH0AY2NjEwsLCx01dmVlheHh4Y6e041e1EmpL4Nap1Qqdfyciy++mMOHD+dSfxB/Zv2u06u+lEolyuVy4XW66U+pVDoWEdfUfbDR3Ik1f5l2A/fULO8F7lq1zX3AR4ELgSuBx4HLmu13YmIiOlUulzt+Tjd6USelvqRWp/qyKF5KP7Ne1elVX9bz7wBwNBrkajunXJ4ELq9Z3pqtq/UEcDgivhkRn6V6tL6tjX2bmVlO2gn0h4Btkq6UdBFwI7D6PefHyCZ6kbQF+E7gsRzbaWaWu9HRUSSddwPqrpfE6Ohon1vdWMtAj4jTwH7gfqAC3BsRJyTdKWlXttn9wLOSTgJl4Jci4tmiGm1mlofl5eW6py7K5XLD08XLy8v9bnZDbU1BFxFHgCOr1t1Wcz+Ad2U3MzPrA88pasb5Y5BXL8cADV2zjctf/TeDlm+3zQaBA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEetmhmG1bcfgkcvPS89VMAi02es0450M1sw9Idz9Udlrq4uMjU1FT950jEwWLb1S2fcjEzS4QD3cwsEQ50M7NEONDNzBLhQDczS4QD3cwsEQ50M7NEONDNzBLhLxaZ2Ya2ejKTVkZGRgpqydo50M1sw2o0eYmkgZzYxKdczMwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRDjQzcwS4UA3M0uEA93MLBEOdDOzRLQV6JJ2SnpU0ilJt9R5/CZJz0h6OLvdnH9TzcysmZZXW5Q0BNwNvAV4AnhI0uGIOLlq07+MiP0FtNHMrHCrL6NbuzwoV15s5wj9WuBURDwWEc8DC8ANxTbLzKy3IuLsrVwun7M8KNSqsZJ2Azsj4uZseS9wXe3RuKSbgF8DngE+A7wzIh6vs699wD6AsbGxiYWFhY4au7KywvDwcEfP6UYv6qTUl9TqpNSX1Oqk1Jdu65RKpWMRcU3dB2v/CtW7AbuBe2qW9wJ3rdrmZcCLsvs/C/xjq/1OTExEp8rlcsfP6UYv6qTUl9TqpNSX1Oqk1Jdu6wBHo0GutnPK5Ung8prlrdm62j8Kz0bE/2aL9wAT7f2tMTOzvLQT6A8B2yRdKeki4EbgcO0Gkl5Zs7gLqOTXRDMza0fLUS4RcVrSfuB+YAj4QESckHQn1UP/w8DPS9oFnAa+AtxUYJvNzKyOtiaJjogjwJFV626ruX8rcGu+TTMzs074m6JmZolwoJuZJaLlOPTCCkvPAP/d4dO2AF8uoDn9qJNSX1Krk1JfUquTUl+6rfOaiHh5vQf6FujdkHQ0Gg2oH7A6KfUltTop9SW1Oin1pYg6PuViZpYIB7qZWSIGLdD/KKE6KfUltTop9SW1Oin1Jfc6A3UO3czMGhu0I3QzM2vAgW5mloiBCHRJH5D0tKTjBda4XFJZ0klJJyT9QkF1XizpXyV9OqtzRxF1auoNSfqUpPsKrPE5Sf+eTT94tKAal0n6kKT/kFSR9D0F1HhtzTSKD0t6TtIv5l0nq/XO7P//uKR5SS8uoMYvZPs/kXc/6r0mJY1K+ntJ/5n9O1JAjR/L+vN/knIZ7tegzm9lv2uPSPqopMsKqvOrWY2HJT0g6VVrKtLourrr6Qa8GXgDcLzAGq8E3pDdv5jqRB1XFVBHwHB2/0Lgk8AbC+zXu4BDwH0F1vgcsKXg34E/BW7O7l8EXFZwvSHgi1S/xJH3vl8NfBZ4SbZ8L3BTzjW2A8eBl1K9ZtM/AN+R4/7Pe00Cvwnckt2/BfiNAmqMA68FFoFrCuzLDwKbsvu/sda+NKlzSc39nwfev5YaA3GEHhH/RPUqjkXWeCoi/i27/z9ULwH86gLqRESsZIsXZrdCPpmWtBX4IarXqB9Yki6l+mKYA4iI5yPiqwWX3QH8V0R0+m3mdm0CXiJpE9XQ/ULO+x8HPhkR34iI08DHgbfntfMGr8kbqP7hJfv3h/OuERGViHh0Lftts84D2c8N4BNU54Eoos5zNYubWWMWDESg95qkK4DXUz16LmL/Q5IeBp4G/j4iCqkD/B7wy8D/FbT/MwJ4QNKxbJrBvF1JdXrDP85OH90jaXMBdWrdCMwXseOIeBL4beDzwFPA1yLigZzLHAe+T9LLJL0UeCvnTlRThLGIeCq7/0VgrOB6vfIzwN8WtXNJs5IeB34CuK3V9s040FeRNAx8GPjFVX89cxMRL0TE66j+1b9W0va8a0h6G/B0RBzLe991TEbEG4DrgZ+T9Oac97+J6lvV90XE64GvU31LX4hsIpddwF8VtP8RqkezVwKvAjZL+sk8a0REheqpggeAvwMeBl7Is0aL+kFB7zx7SdIM1XkePlhUjYiYiYjLsxr7W23fjAO9hqQLqYb5ByPiI0XXy04blIGdBez+TcAuSZ8DFoDvl/QXBdQ5c8RJRDwNfBS4NucSTwBP1LyT+RDVgC/K9cC/RcSXCtr/DwCfjYhnIuKbwEeA7827SETMRcRERLwZWKb6uVCRvnRm9rLs36cLrlcoSTcBbwN+IvsDVbQPAj+6lh040DOSRPUcbSUifqfAOi8/84m5pJcAbwH+I+86EXFrRGyNiCuonj74x4jI9SgQQNJmSRefuU/1w6RcRyNFxBeBxyW9Nlu1AziZZ41V9lDQ6ZbM54E3Snpp9nu3gwKmbZT0iuzfb6N6/vxQ3jVWOQz8VHb/p4C/LrheYSTtpHq6cldEfKPAOttqFm9grVmQx6fERd+ovrieAr5J9WhtuoAak1TfIj5C9e3pw8BbC6jz3cCnsjrHgdt68POboqBRLsC3A5/ObieAmYLqvA44mv3cPgaMFFRnM/AscGnB/yd3ZC/e48CfAy8qoMY/U/3D92lgR877Pu81CbwMeBD4T6qjakYLqPEj2f3/Bb4E3F9QX04Bj9dkwZpGnzSp8+Hsd+AR4G+AV6+lhr/6b2aWCJ9yMTNLhAPdzCwRDnQzs0Q40M3MEuFANzNLhAPdzCwRDnQzs0T8P6RyKI1RDRIBAAAAAElFTkSuQmCC\n", |
|
|
3198 |
"text/plain": [ |
|
|
3199 |
"<Figure size 432x288 with 1 Axes>" |
|
|
3200 |
] |
|
|
3201 |
}, |
|
|
3202 |
"metadata": { |
|
|
3203 |
"needs_background": "light" |
|
|
3204 |
}, |
|
|
3205 |
"output_type": "display_data" |
|
|
3206 |
} |
|
|
3207 |
], |
|
|
3208 |
"source": [ |
|
|
3209 |
"fig = plt.figure()\n", |
|
|
3210 |
"plt.boxplot(fold_vacc)\n", |
|
|
3211 |
"plt.grid()\n", |
|
|
3212 |
"plt.show()" |
|
|
3213 |
] |
|
|
3214 |
} |
|
|
3215 |
], |
|
|
3216 |
"metadata": { |
|
|
3217 |
"kernelspec": { |
|
|
3218 |
"display_name": "Pytorch", |
|
|
3219 |
"language": "python", |
|
|
3220 |
"name": "pytorch" |
|
|
3221 |
}, |
|
|
3222 |
"language_info": { |
|
|
3223 |
"codemirror_mode": { |
|
|
3224 |
"name": "ipython", |
|
|
3225 |
"version": 3 |
|
|
3226 |
}, |
|
|
3227 |
"file_extension": ".py", |
|
|
3228 |
"mimetype": "text/x-python", |
|
|
3229 |
"name": "python", |
|
|
3230 |
"nbconvert_exporter": "python", |
|
|
3231 |
"pygments_lexer": "ipython3", |
|
|
3232 |
"version": "3.7.5" |
|
|
3233 |
} |
|
|
3234 |
}, |
|
|
3235 |
"nbformat": 4, |
|
|
3236 |
"nbformat_minor": 4 |
|
|
3237 |
} |