Diff of /eval_base.py [000000] .. [5d1c0a]

Switch to unified view

a b/eval_base.py
1
#!/usr/bin/env python
2
# coding: utf-8
3
'''Subject-independent model evaluator.
4
'''
5
import argparse
6
import json
7
import logging
8
import sys
9
from os.path import join as pjoin
10
11
import h5py
12
import numpy as np
13
import torch
14
import torch.nn.functional as F
15
from braindecode.models.deep4 import Deep4Net
16
from braindecode.torch_ext.optimizers import AdamW
17
from braindecode.torch_ext.util import set_random_seeds
18
19
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
20
                    level=logging.INFO, stream=sys.stdout)
21
22
parser = argparse.ArgumentParser(
23
    description='Subject independent model evaluator.')
24
parser.add_argument('datapath', type=str, help='Path to KU data')
25
parser.add_argument('modelpath', type=str, help='Path to base model')
26
parser.add_argument('outpath', type=str, help='Path to output')
27
parser.add_argument('-gpu', type=int, help='The gpu device to use', default=0)
28
29
args = parser.parse_args()
30
datapath = args.datapath
31
outpath = args.outpath
32
modelpath = args.modelpath
33
dfile = h5py.File(datapath, 'r')
34
torch.cuda.set_device(args.gpu)
35
set_random_seeds(seed=20200205, cuda=True)
36
BATCH_SIZE = 16
37
38
# Randomly shuffled subject.
39
subjs = [35, 47, 46, 37, 13, 27, 12, 32, 53, 54, 4, 40, 19, 41, 18, 42, 34, 7,
40
         49, 9, 5, 48, 29, 15, 21, 17, 31, 45, 1, 38, 51, 8, 11, 16, 28, 44, 24,
41
         52, 3, 26, 39, 50, 6, 23, 2, 14, 25, 20, 10, 33, 22, 43, 36, 30]
42
43
44
# Get data from single subject.
45
def get_data(subj):
46
    dpath = '/s' + str(subj)
47
    X = dfile[pjoin(dpath, 'X')]
48
    Y = dfile[pjoin(dpath, 'Y')]
49
    return X[:], Y[:]
50
51
52
X, Y = get_data(subjs[0])
53
n_classes = 2
54
in_chans = X.shape[1]
55
# final_conv_length = auto ensures we only get a single output in the time dimension
56
model = Deep4Net(in_chans=in_chans, n_classes=n_classes,
57
                 input_time_length=X.shape[2],
58
                 final_conv_length='auto').cuda()
59
60
# Dummy train data to set up the model.
61
X_train = np.zeros(X[:2].shape).astype(np.float32)
62
Y_train = np.zeros(Y[:2].shape).astype(np.int64)
63
64
65
def reset_model(checkpoint):
66
    # Load the state dict of the model.
67
    model.network.load_state_dict(checkpoint['model_state_dict'])
68
69
    # Only optimize parameters that requires gradient.
70
    optimizer = AdamW(filter(lambda p: p.requires_grad, model.network.parameters()),
71
                      lr=1*0.01, weight_decay=0.5*0.001)
72
    model.compile(loss=F.nll_loss, optimizer=optimizer,
73
                  iterator_seed=20200205, )
74
75
76
for fold, subj in enumerate(subjs):
77
    suffix = '_s' + str(subj) + '_f' + str(fold)
78
    checkpoint = torch.load(pjoin(modelpath, 'model_f' + str(fold) + '.pt'),
79
                            map_location='cuda:' + str(args.gpu))
80
81
    # Set up the model.
82
    reset_model(checkpoint)
83
    model.fit(X_train, Y_train, 0, BATCH_SIZE)
84
85
    X, Y = get_data(subj)
86
    X_test, Y_test = X[300:], Y[300:]
87
    test_loss = model.evaluate(X_test, Y_test)
88
    with open(pjoin(outpath, 'test_base' + suffix + '.json'), 'w') as f:
89
        json.dump(test_loss, f)
90
91
dfile.close()