|
a |
|
b/models/InceptionTime.py |
|
|
1 |
import pandas as pd |
|
|
2 |
import numpy as np |
|
|
3 |
import sklearn.metrics as skm |
|
|
4 |
import matplotlib.pyplot as plt |
|
|
5 |
from sklearn.model_selection import train_test_split |
|
|
6 |
from sklearn.preprocessing import LabelEncoder, StandardScaler |
|
|
7 |
import torch |
|
|
8 |
from torch import nn |
|
|
9 |
from torch import optim |
|
|
10 |
from torch.nn import functional as F |
|
|
11 |
from torch.optim.lr_scheduler import _LRScheduler |
|
|
12 |
from torch.utils.data import TensorDataset, DataLoader |
|
|
13 |
from tqdm import tqdm, trange |
|
|
14 |
from torchsummary import summary |
|
|
15 |
import getopt, sys |
|
|
16 |
from tsai.imports import * |
|
|
17 |
from tsai.models.layers import * |
|
|
18 |
from tsai.models.utils import * |
|
|
19 |
|
|
|
20 |
def noop(x): |
|
|
21 |
pass |
|
|
22 |
|
|
|
23 |
def shortcut(c_in, c_out): |
|
|
24 |
return nn.Sequential(*[nn.Conv1d(c_in, c_out, kernel_size=1), |
|
|
25 |
nn.BatchNorm1d(c_out)]) |
|
|
26 |
def convert_sig(x): |
|
|
27 |
return(0 if x<0.5 else 1) |
|
|
28 |
|
|
|
29 |
class InceptionModule(Module): |
|
|
30 |
def __init__(self, ni, nf, ks=40, bottleneck=True): |
|
|
31 |
ks = [ks // (2**i) for i in range(3)] |
|
|
32 |
ks = [k if k % 2 != 0 else k - 1 for k in ks] # ensure odd ks |
|
|
33 |
bottleneck = bottleneck if ni > 0 else False |
|
|
34 |
self.bottleneck = Conv1d(ni, nf, 1, bias=False) if bottleneck else noop |
|
|
35 |
self.convs = nn.ModuleList([Conv1d(nf if bottleneck else ni, nf, k, bias=False) for k in ks]) |
|
|
36 |
self.maxconvpool = nn.Sequential(*[nn.MaxPool1d(3, stride=1, padding=1), Conv1d(ni, nf, 1, bias=False)]) |
|
|
37 |
self.concat = Concat() |
|
|
38 |
self.bn = BN1d(nf * 4) |
|
|
39 |
self.act = nn.ReLU() |
|
|
40 |
|
|
|
41 |
def forward(self, x): |
|
|
42 |
input_tensor = x |
|
|
43 |
x = self.bottleneck(input_tensor) |
|
|
44 |
x = self.concat([l(x) for l in self.convs] + [self.maxconvpool(input_tensor)]) |
|
|
45 |
return self.act(self.bn(x)) |
|
|
46 |
|
|
|
47 |
|
|
|
48 |
@delegates(InceptionModule.__init__) |
|
|
49 |
class InceptionBlock(Module): |
|
|
50 |
def __init__(self, ni, nf=32, residual=True, depth = 6, **kwargs): |
|
|
51 |
self.residual, self.depth = residual, depth |
|
|
52 |
self.inception, self.shortcut = nn.ModuleList(), nn.ModuleList() |
|
|
53 |
for d in range(depth): |
|
|
54 |
self.inception.append(InceptionModule(ni if d == 0 else nf * 4, nf, **kwargs)) |
|
|
55 |
if self.residual and d % 3 == 2: |
|
|
56 |
n_in, n_out = ni if d == 2 else nf * 4, nf * 4 |
|
|
57 |
self.shortcut.append(BN1d(n_in) if n_in == n_out else ConvBlock(n_in, n_out, 1, act=None)) |
|
|
58 |
self.add = Add() |
|
|
59 |
self.act = nn.ReLU() |
|
|
60 |
|
|
|
61 |
def forward(self, x): |
|
|
62 |
res = x |
|
|
63 |
for d, l in enumerate(range(self.depth)): |
|
|
64 |
x = self.inception[d](x) |
|
|
65 |
if self.residual and d % 3 == 2: res = x = self.act(self.add(x, self.shortcut[d//3](res))) |
|
|
66 |
return x |
|
|
67 |
|
|
|
68 |
|
|
|
69 |
@delegates(InceptionModule.__init__) |
|
|
70 |
class InceptionTime(Module): |
|
|
71 |
def __init__(self, c_in, c_out, nf=32, nb_filters=None, **kwargs): |
|
|
72 |
nf = ifnone(nf, nb_filters) # for compatibility |
|
|
73 |
self.inceptionblock = InceptionBlock(c_in, nf, **kwargs) |
|
|
74 |
self.gap = GAP1d(1) |
|
|
75 |
self.fc = nn.Linear(nf * 4, c_out) |
|
|
76 |
self.sig = nn.Sigmoid() |
|
|
77 |
|
|
|
78 |
def forward(self, x): |
|
|
79 |
x = self.inceptionblock(x) |
|
|
80 |
x = self.gap(x) |
|
|
81 |
x = self.sig(self.fc(x)) |
|
|
82 |
return x |
|
|
83 |
|
|
|
84 |
def train_model(train_dl: DataLoader, |
|
|
85 |
test_dl: DataLoader, |
|
|
86 |
device: str, |
|
|
87 |
model: nn.Module, |
|
|
88 |
epochs: int, |
|
|
89 |
learning_rate: float, |
|
|
90 |
Save: bool): |
|
|
91 |
|
|
|
92 |
optimiser = optim.Adam(model.parameters() ,lr=learning_rate) |
|
|
93 |
history = [] |
|
|
94 |
|
|
|
95 |
loss_history = [] |
|
|
96 |
acc_history = [] |
|
|
97 |
|
|
|
98 |
epoch_bar = trange(epochs) |
|
|
99 |
for epoch in epoch_bar: |
|
|
100 |
|
|
|
101 |
epoch_loss = 0 |
|
|
102 |
model.train(mode = True) |
|
|
103 |
for batch, data in enumerate(train_dl): |
|
|
104 |
x,y = data |
|
|
105 |
x,y = x.to(device),y.to(device) |
|
|
106 |
|
|
|
107 |
#computation graph (forward prop ->compute loss ->back prop ->update weights) |
|
|
108 |
optimiser.zero_grad() |
|
|
109 |
|
|
|
110 |
out = model(x) |
|
|
111 |
|
|
|
112 |
y = torch.Tensor(y.cpu().detach().numpy()).view(y.shape[0], 1).to(device) |
|
|
113 |
|
|
|
114 |
loss = loss_func(out, y) |
|
|
115 |
epoch_loss += loss.item() |
|
|
116 |
loss.backward() |
|
|
117 |
|
|
|
118 |
optimiser.step() |
|
|
119 |
|
|
|
120 |
loss_history.append(epoch_loss) |
|
|
121 |
print ("Train Loss: ",epoch_loss/len(train_dl)) |
|
|
122 |
|
|
|
123 |
#Validation |
|
|
124 |
|
|
|
125 |
running_loss = 0 |
|
|
126 |
running_acc = 0 |
|
|
127 |
running_far = 0 |
|
|
128 |
model.eval() |
|
|
129 |
for batch, data in enumerate(test_dl): |
|
|
130 |
x, y = data |
|
|
131 |
x, y = x.to(device),y.to(device) |
|
|
132 |
out = model(x) |
|
|
133 |
convert_soft = np.vectorize(convert_sig) |
|
|
134 |
out1 = torch.Tensor(convert_soft(out.cpu().detach().numpy())).view(-1).to(device) |
|
|
135 |
test_acc = (out1 == y.view(-1)).cpu().detach().numpy().sum()/len(y) |
|
|
136 |
y = torch.Tensor(y.cpu().detach().numpy()).view(y.shape[0], 1).to(device) |
|
|
137 |
test_loss = loss_func(out,y).item() |
|
|
138 |
running_acc += test_acc |
|
|
139 |
running_loss += test_loss |
|
|
140 |
|
|
|
141 |
test_size = len(test_dl) |
|
|
142 |
test_acc = running_acc/(batch+1) |
|
|
143 |
test_loss = running_loss/(batch+1) |
|
|
144 |
|
|
|
145 |
|
|
|
146 |
epoch_bar.set_description('acc={0:.2f}%\tBCE={1:.4f}%' |
|
|
147 |
.format(test_acc, test_loss)) |
|
|
148 |
|
|
|
149 |
|
|
|
150 |
return model,history |
|
|
151 |
|
|
|
152 |
if __name__ == "__main__": |
|
|
153 |
|
|
|
154 |
arch = InceptionTime(1, 1) |
|
|
155 |
|
|
|
156 |
df = pd.read_csv('finaldfs/ecgfiltered30sec.csv', index_col = 0) |
|
|
157 |
|
|
|
158 |
train = df.groupby('infant_no').apply(lambda group : group[group['brady_no'] <= (group['brady_no'].max())*0.7]).copy() |
|
|
159 |
test = df.groupby('infant_no').apply(lambda group : group[group['brady_no'] > (group['brady_no'].max())*0.7]).copy() |
|
|
160 |
|
|
|
161 |
|
|
|
162 |
x_train = train[train.columns[4:-2]] |
|
|
163 |
y_train = train['brady'] |
|
|
164 |
x_test = test[test.columns[4:-2]] |
|
|
165 |
y_test = test['brady'] |
|
|
166 |
|
|
|
167 |
x_train = np.expand_dims(x_train, axis = 1) |
|
|
168 |
x_test = np.expand_dims(x_test, axis = 1) |
|
|
169 |
|
|
|
170 |
x_train = torch.Tensor(x_train) |
|
|
171 |
x_test = torch.Tensor(x_test) |
|
|
172 |
y_train = torch.Tensor(y_train.to_numpy()) |
|
|
173 |
y_test = torch.Tensor(y_test.to_numpy()) |
|
|
174 |
|
|
|
175 |
train_ds = TensorDataset(x_train, y_train) |
|
|
176 |
test_ds = TensorDataset(x_test, y_test) |
|
|
177 |
|
|
|
178 |
train_dl = DataLoader(train_ds, batch_size = 10, shuffle = True) |
|
|
179 |
test_dl = DataLoader(test_ds, batch_size = 10, shuffle = True) |
|
|
180 |
|
|
|
181 |
device = torch.device('cuda:0') |
|
|
182 |
|
|
|
183 |
train_size = x_train.shape[0] |
|
|
184 |
test_size = x_test.shape[0] |
|
|
185 |
time_steps = x_train.shape[-1] |
|
|
186 |
num_classes = 1 |
|
|
187 |
learning_rate = 1e-6 |
|
|
188 |
drop = 0.2 |
|
|
189 |
epochs = 100 |
|
|
190 |
loss_func = nn.BCELoss() |
|
|
191 |
|
|
|
192 |
model = arch |
|
|
193 |
model = model.to(device) |
|
|
194 |
train_model(train_dl, |
|
|
195 |
test_dl, |
|
|
196 |
device, |
|
|
197 |
model, |
|
|
198 |
epochs, |
|
|
199 |
learning_rate, False) |