a b/eeglearn/eeg_cnn_lib.py
1
from __future__ import print_function
2
import time
3
4
import numpy as np
5
np.random.seed(1234)
6
from functools import reduce
7
import math as m
8
9
import scipy.io
10
import theano
11
import theano.tensor as T
12
13
from scipy.interpolate import griddata
14
from sklearn.preprocessing import scale
15
from utils import augment_EEG, cart2sph, pol2cart
16
17
import lasagne
18
from lasagne.regularization import regularize_layer_params, regularize_network_params, l1, l2
19
from lasagne.layers import Conv2DLayer, MaxPool2DLayer, InputLayer
20
from lasagne.layers import DenseLayer, ElemwiseMergeLayer, FlattenLayer
21
from lasagne.layers import ConcatLayer, ReshapeLayer, get_output_shape
22
from lasagne.layers import Conv1DLayer, DimshuffleLayer, LSTMLayer, SliceLayer
23
24
25
def azim_proj(pos):
26
    """
27
    Computes the Azimuthal Equidistant Projection of input point in 3D Cartesian Coordinates.
28
    Imagine a plane being placed against (tangent to) a globe. If
29
    a light source inside the globe projects the graticule onto
30
    the plane the result would be a planar, or azimuthal, map
31
    projection.
32
33
    :param pos: position in 3D Cartesian coordinates
34
    :return: projected coordinates using Azimuthal Equidistant Projection
35
    """
36
    [r, elev, az] = cart2sph(pos[0], pos[1], pos[2])
37
    return pol2cart(az, m.pi / 2 - elev)
38
39
40
def gen_images(locs, features, n_gridpoints, normalize=True,
41
               augment=False, pca=False, std_mult=0.1, n_components=2, edgeless=False):
42
    """
43
    Generates EEG images given electrode locations in 2D space and multiple feature values for each electrode
44
45
    :param locs: An array with shape [n_electrodes, 2] containing X, Y
46
                        coordinates for each electrode.
47
    :param features: Feature matrix as [n_samples, n_features]
48
                                Features are as columns.
49
                                Features corresponding to each frequency band are concatenated.
50
                                (alpha1, alpha2, ..., beta1, beta2,...)
51
    :param n_gridpoints: Number of pixels in the output images
52
    :param normalize:   Flag for whether to normalize each band over all samples
53
    :param augment:     Flag for generating augmented images
54
    :param pca:         Flag for PCA based data augmentation
55
    :param std_mult     Multiplier for std of added noise
56
    :param n_components: Number of components in PCA to retain for augmentation
57
    :param edgeless:    If True generates edgeless images by adding artificial channels
58
                        at four corners of the image with value = 0 (default=False).
59
    :return:            Tensor of size [samples, colors, W, H] containing generated
60
                        images.
61
    """
62
    feat_array_temp = []
63
    nElectrodes = locs.shape[0]     # Number of electrodes
64
65
    # Test whether the feature vector length is divisible by number of electrodes
66
    assert features.shape[1] % nElectrodes == 0
67
    n_colors = features.shape[1] / nElectrodes
68
    for c in range(n_colors):
69
        feat_array_temp.append(features[:, c * nElectrodes : nElectrodes * (c+1)])
70
    if augment:
71
        if pca:
72
            for c in range(n_colors):
73
                feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=True, n_components=n_components)
74
        else:
75
            for c in range(n_colors):
76
                feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=False, n_components=n_components)
77
    n_samples = features.shape[0]
78
79
    # Interpolate the values
80
    grid_x, grid_y = np.mgrid[
81
                     min(locs[:, 0]):max(locs[:, 0]):n_gridpoints*1j,
82
                     min(locs[:, 1]):max(locs[:, 1]):n_gridpoints*1j
83
                     ]
84
    temp_interp = []
85
    for c in range(n_colors):
86
        temp_interp.append(np.zeros([n_samples, n_gridpoints, n_gridpoints]))
87
88
    # Generate edgeless images
89
    if edgeless:
90
        min_x, min_y = np.min(locs, axis=0)
91
        max_x, max_y = np.max(locs, axis=0)
92
        locs = np.append(locs, np.array([[min_x, min_y], [min_x, max_y], [max_x, min_y], [max_x, max_y]]), axis=0)
93
        for c in range(n_colors):
94
            feat_array_temp[c] = np.append(feat_array_temp[c], np.zeros((n_samples, 4)), axis=1)
95
96
    # Interpolating
97
    for i in range(n_samples):
98
        for c in range(n_colors):
99
            temp_interp[c][i, :, :] = griddata(locs, feat_array_temp[c][i, :], (grid_x, grid_y),
100
                                               method='cubic', fill_value=np.nan)
101
        print('Interpolating {0}/{1}\r'.format(i + 1, n_samples), end='\r')
102
103
    # Normalizing
104
    for c in range(n_colors):
105
        if normalize:
106
            temp_interp[c][~np.isnan(temp_interp[c])] = \
107
                scale(temp_interp[c][~np.isnan(temp_interp[c])])
108
        temp_interp[c] = np.nan_to_num(temp_interp[c])
109
    return np.swapaxes(np.asarray(temp_interp), 0, 1)     # swap axes to have [samples, colors, W, H]
110
111
112
def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):
113
    """
114
    Builds a VGG style CNN network followed by a fully-connected layer and a softmax layer.
115
    Stacks are separated by a maxpool layer. Number of kernels in each layer is twice
116
    the number in previous stack.
117
    input_var: Theano variable for input to the network
118
    outputs: pointer to the output of the last layer of network (softmax)
119
120
    :param input_var: theano variable as input to the network
121
    :param w_init: Initial weight values
122
    :param n_layers: number of layers in each stack. An array of integers with each
123
                    value corresponding to the number of layers in each stack.
124
                    (e.g. [4, 2, 1] == 3 stacks with 4, 2, and 1 layers in each.
125
    :param n_filters_first: number of filters in the first layer
126
    :param imsize: Size of the image
127
    :param n_colors: Number of color channels (depth)
128
    :return: a pointer to the output of last layer
129
    """
130
    weights = []        # Keeps the weights for all layers
131
    count = 0
132
    # If no initial weight is given, initialize with GlorotUniform
133
    if w_init is None:
134
        w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)
135
    # Input layer
136
    network = InputLayer(shape=(None, n_colors, imsize, imsize),
137
                                        input_var=input_var)
138
    for i, s in enumerate(n_layers):
139
        for l in range(s):
140
            network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),
141
                          W=w_init[count], pad='same')
142
            count += 1
143
            weights.append(network.W)
144
        network = MaxPool2DLayer(network, pool_size=(2, 2))
145
    return network, weights
146
147
148
def build_convpool_max(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
149
    """
150
    Builds the complete network with maxpooling layer in time.
151
152
    :param input_vars: list of EEG images (one image per time window)
153
    :param nb_classes: number of classes
154
    :param imsize: size of the input image (assumes a square input)
155
    :param n_colors: number of color channels in the image
156
    :param n_timewin: number of time windows in the snippet
157
    :return: a pointer to the output of last layer
158
    """
159
    convnets = []
160
    w_init = None
161
    # Build 7 parallel CNNs with shared weights
162
    for i in range(n_timewin):
163
        if i == 0:
164
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
165
        else:
166
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
167
        convnets.append(convnet)
168
    # convpooling using Max pooling over frames
169
    convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)
170
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
171
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
172
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
173
    # And, finally, the output layer with 50% dropout on its inputs:
174
    convpool = lasagne.layers.DenseLayer(lasagne.layers.dropout(convpool, p=.5),
175
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
176
    return convpool
177
178
179
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
180
    """
181
    Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.
182
183
    :param input_vars: list of EEG images (one image per time window)
184
    :param nb_classes: number of classes
185
    :param imsize: size of the input image (assumes a square input)
186
    :param n_colors: number of color channels in the image
187
    :param n_timewin: number of time windows in the snippet
188
    :return: a pointer to the output of last layer
189
    """
190
    convnets = []
191
    w_init = None
192
    # Build 7 parallel CNNs with shared weights
193
    for i in range(n_timewin):
194
        if i == 0:
195
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
196
        else:
197
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
198
        convnets.append(FlattenLayer(convnet))
199
    # at this point convnets shape is [numTimeWin][n_samples, features]
200
    # we want the shape to be [n_samples, features, numTimeWin]
201
    convpool = ConcatLayer(convnets)
202
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
203
    convpool = DimshuffleLayer(convpool, (0, 2, 1))
204
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
205
    convpool = Conv1DLayer(convpool, 64, 3)
206
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
207
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
208
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
209
    # And, finally, the output layer with 50% dropout on its inputs:
210
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
211
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
212
    return convpool
213
214
215
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
216
    """
217
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.
218
219
    :param input_vars: list of EEG images (one image per time window)
220
    :param nb_classes: number of classes
221
    :param grad_clip:  the gradient messages are clipped to the given value during
222
                        the backward pass.
223
    :param imsize: size of the input image (assumes a square input)
224
    :param n_colors: number of color channels in the image
225
    :param n_timewin: number of time windows in the snippet
226
    :return: a pointer to the output of last layer
227
    """
228
    convnets = []
229
    w_init = None
230
    # Build 7 parallel CNNs with shared weights
231
    for i in range(n_timewin):
232
        if i == 0:
233
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
234
        else:
235
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
236
        convnets.append(FlattenLayer(convnet))
237
    # at this point convnets shape is [numTimeWin][n_samples, features]
238
    # we want the shape to be [n_samples, features, numTimeWin]
239
    convpool = ConcatLayer(convnets)
240
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
241
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
242
    convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
243
        nonlinearity=lasagne.nonlinearities.tanh)
244
    # We only need the final prediction, we isolate that quantity and feed it
245
    # to the next layer.
246
    convpool = SliceLayer(convpool, -1, 1)      # Selecting the last prediction
247
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
248
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
249
            num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
250
    # And, finally, the output layer with 50% dropout on its inputs:
251
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
252
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
253
    return convpool
254
255
256
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
257
    """
258
    Builds the complete network with LSTM and 1D-conv layers combined
259
260
    :param input_vars: list of EEG images (one image per time window)
261
    :param nb_classes: number of classes
262
    :param grad_clip:  the gradient messages are clipped to the given value during
263
                        the backward pass.
264
    :param imsize: size of the input image (assumes a square input)
265
    :param n_colors: number of color channels in the image
266
    :param n_timewin: number of time windows in the snippet
267
    :return: a pointer to the output of last layer
268
    """
269
    convnets = []
270
    w_init = None
271
    # Build 7 parallel CNNs with shared weights
272
    for i in range(n_timewin):
273
        if i == 0:
274
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
275
        else:
276
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
277
        convnets.append(FlattenLayer(convnet))
278
    # at this point convnets shape is [numTimeWin][n_samples, features]
279
    # we want the shape to be [n_samples, features, numTimeWin]
280
    convpool = ConcatLayer(convnets)
281
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
282
    reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
283
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
284
    conv_out = Conv1DLayer(reformConvpool, 64, 3)
285
    conv_out = FlattenLayer(conv_out)
286
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
287
    lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
288
        nonlinearity=lasagne.nonlinearities.tanh)
289
    lstm_out = SliceLayer(lstm, -1, 1)
290
    # Merge 1D-Conv and LSTM outputs
291
    dense_input = ConcatLayer([conv_out, lstm_out])
292
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
293
    convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
294
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
295
    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
296
    convpool = DenseLayer(convpool,
297
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
298
    return convpool
299
300
301
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
302
    """
303
    Iterates over the samples returing batches of size batchsize.
304
    :param inputs: input data array. It should be a 4D numpy array for images [n_samples, n_colors, W, H] and 5D numpy
305
                    array if working with sequence of images [n_timewindows, n_samples, n_colors, W, H].
306
    :param targets: vector of target labels.
307
    :param batchsize: Batch size
308
    :param shuffle: Flag whether to shuffle the samples before iterating or not.
309
    :return: images and labels for a batch
310
    """
311
    if inputs.ndim == 4:
312
        input_len = inputs.shape[0]
313
    elif inputs.ndim == 5:
314
        input_len = inputs.shape[1]
315
    assert input_len == len(targets)
316
    if shuffle:
317
        indices = np.arange(input_len)
318
        np.random.shuffle(indices)
319
    for start_idx in range(0, input_len, batchsize):
320
        if shuffle:
321
            excerpt = indices[start_idx:start_idx + batchsize]
322
        else:
323
            excerpt = slice(start_idx, start_idx + batchsize)
324
        if inputs.ndim == 4:
325
            yield inputs[excerpt], targets[excerpt]
326
        elif inputs.ndim == 5:
327
            yield inputs[:, excerpt], targets[excerpt]
328
329
330
def train(images, labels, fold, model_type, batch_size=32, num_epochs=5):
331
    """
332
    A sample training function which loops over the training set and evaluates the network
333
    on the validation set after each epoch. Evaluates the network on the training set
334
    whenever the
335
    :param images: input images
336
    :param labels: target labels
337
    :param fold: tuple of (train, test) index numbers
338
    :param model_type: model type ('cnn', '1dconv', 'maxpool', 'lstm', 'mix')
339
    :param batch_size: batch size for training
340
    :param num_epochs: number of epochs of dataset to go over for training
341
    :return: none
342
    """
343
    num_classes = len(np.unique(labels))
344
    (X_train, y_train), (X_val, y_val), (X_test, y_test) = reformatInput(images, labels, fold)
345
    X_train = X_train.astype("float32", casting='unsafe')
346
    X_val = X_val.astype("float32", casting='unsafe')
347
    X_test = X_test.astype("float32", casting='unsafe')
348
    # Prepare Theano variables for inputs and targets
349
    input_var = T.TensorType('floatX', ((False,) * 5))()
350
    target_var = T.ivector('targets')
351
    # Create neural network model (depending on first command line parameter)
352
    print("Building model and compiling functions...")
353
    # Building the appropriate model
354
    if model_type == '1dconv':
355
        network = build_convpool_conv1d(input_var, num_classes)
356
    elif model_type == 'maxpool':
357
        network = build_convpool_max(input_var, num_classes)
358
    elif model_type == 'lstm':
359
        network = build_convpool_lstm(input_var, num_classes, 100)
360
    elif model_type == 'mix':
361
        network = build_convpool_mix(input_var, num_classes, 100)
362
    elif model_type == 'cnn':
363
        input_var = T.tensor4('inputs')
364
        network, _ = build_cnn(input_var)
365
        network = DenseLayer(lasagne.layers.dropout(network, p=.5),
366
                             num_units=256,
367
                             nonlinearity=lasagne.nonlinearities.rectify)
368
        network = DenseLayer(lasagne.layers.dropout(network, p=.5),
369
                             num_units=num_classes,
370
                             nonlinearity=lasagne.nonlinearities.softmax)
371
    else:
372
        raise ValueError("Model not supported ['1dconv', 'maxpool', 'lstm', 'mix', 'cnn']")
373
    # Create a loss expression for training, i.e., a scalar objective we want
374
    # to minimize (for our multi-class problem, it is the cross-entropy loss):
375
    prediction = lasagne.layers.get_output(network)
376
    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
377
    loss = loss.mean()
378
    reg_factor = 1e-4
379
    l2_penalty = regularize_network_params(network, l2) * reg_factor
380
    loss += l2_penalty
381
382
    params = lasagne.layers.get_all_params(network, trainable=True)
383
    updates = lasagne.updates.adam(loss, params, learning_rate=0.001)
384
    # Create a loss expression for validation/testing. The crucial difference
385
    # here is that we do a deterministic forward pass through the network,
386
    # disabling dropout layers.
387
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
388
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
389
                                                            target_var)
390
    test_loss = test_loss.mean()
391
    # As a bonus, also create an expression for the classification accuracy:
392
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
393
                      dtype=theano.config.floatX)
394
    # Compile a function performing a training step on a mini-batch (by giving
395
    # the updates dictionary) and returning the corresponding training loss:
396
    train_fn = theano.function([input_var, target_var], loss, updates=updates)
397
    # Compile a second function computing the validation loss and accuracy:
398
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
399
    # Finally, launch the training loop.
400
    print("Starting training...")
401
    best_validation_accu = 0
402
    # We iterate over epochs:
403
    for epoch in range(num_epochs):
404
        # In each epoch, we do a full pass over the training data:
405
        train_err = 0
406
        train_batches = 0
407
        start_time = time.time()
408
        for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=False):
409
            inputs, targets = batch
410
            train_err += train_fn(inputs, targets)
411
            train_batches += 1
412
        # And a full pass over the validation data:
413
        val_err = 0
414
        val_acc = 0
415
        val_batches = 0
416
        for batch in iterate_minibatches(X_val, y_val, batch_size, shuffle=False):
417
            inputs, targets = batch
418
            err, acc = val_fn(inputs, targets)
419
            val_err += err
420
            val_acc += acc
421
            val_batches += 1
422
        av_train_err = train_err / train_batches
423
        av_val_err = val_err / val_batches
424
        av_val_acc = val_acc / val_batches
425
        # Then we print the results for this epoch:
426
        print("Epoch {} of {} took {:.3f}s".format(
427
            epoch + 1, num_epochs, time.time() - start_time))
428
        print("  training loss:\t\t{:.6f}".format(av_train_err))
429
        print("  validation loss:\t\t{:.6f}".format(av_val_err))
430
        print("  validation accuracy:\t\t{:.2f} %".format(av_val_acc * 100))
431
        if av_val_acc > best_validation_accu:
432
            best_validation_accu = av_val_acc
433
            # After training, we compute and print the test error:
434
            test_err = 0
435
            test_acc = 0
436
            test_batches = 0
437
            for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):
438
                inputs, targets = batch
439
                err, acc = val_fn(inputs, targets)
440
                test_err += err
441
                test_acc += acc
442
                test_batches += 1
443
            av_test_err = test_err / test_batches
444
            av_test_acc = test_acc / test_batches
445
            print("Final results:")
446
            print("  test loss:\t\t\t{:.6f}".format(av_test_err))
447
            print("  test accuracy:\t\t{:.2f} %".format(av_test_acc * 100))
448
            # Dump the network weights to a file like this:
449
            np.savez('weights_lasg_{0}'.format(model_type), *lasagne.layers.get_all_param_values(network))
450
    print('-'*50)
451
    print("Best validation accuracy:\t\t{:.2f} %".format(best_validation_accu * 100))
452
    print("Best test accuracy:\t\t{:.2f} %".format(av_test_acc * 100))
453
    return av_test_acc
454
455
456
if __name__ == '__main__':
457
    from utils import reformatInput
458
459
    # Load electrode locations
460
    print('Loading data...')
461
    locs = scipy.io.loadmat('../Sample data/Neuroscan_locs_orig.mat')
462
    locs_3d = locs['A']
463
    locs_2d = []
464
    # Convert to 2D
465
    for e in locs_3d:
466
        locs_2d.append(azim_proj(e))
467
468
    feats = scipy.io.loadmat('../Sample data/FeatureMat_timeWin.mat')['features']
469
    subj_nums = np.squeeze(scipy.io.loadmat('../Sample data/trials_subNums.mat')['subjectNum'])
470
    # Leave-Subject-Out cross validation
471
    fold_pairs = []
472
    for i in np.unique(subj_nums):
473
        ts = subj_nums == i
474
        tr = np.squeeze(np.nonzero(np.bitwise_not(ts)))
475
        ts = np.squeeze(np.nonzero(ts))
476
        np.random.shuffle(tr)  # Shuffle indices
477
        np.random.shuffle(ts)
478
        fold_pairs.append((tr, ts))
479
480
    # CNN Mode
481
    print('Generating images...')
482
    # Find the average response over time windows
483
    av_feats = reduce(lambda x, y: x+y, [feats[:, i*192:(i+1)*192] for i in range(feats.shape[1] / 192)])
484
    av_feats = av_feats / (feats.shape[1] / 192)
485
    images = gen_images(np.array(locs_2d),
486
                        av_feats,
487
                        32, normalize=True)
488
    print('\n')
489
490
    # Class labels should start from 0
491
    print('Training the CNN Model...')
492
    test_acc_cnn = []
493
    for i in range(len(fold_pairs)):
494
        print('fold {0}/{1}'.format(i + 1, len(fold_pairs)))
495
        test_acc_cnn.append(train(images, np.squeeze(feats[:, -1]) - 1, fold_pairs[i], 'cnn', num_epochs=10))
496
497
    # Conv-LSTM Mode
498
    print('Generating images for all time windows...')
499
    images_timewin = np.array([gen_images(np.array(locs_2d),
500
                                          feats[:, i * 192:(i + 1) * 192], 32, normalize=True) for i in
501
                               range(feats.shape[1] / 192)
502
                               ])
503
    print('\n')
504
    print('Training the LSTM-CONV Model...')
505
    test_acc_mix = []
506
    for i in range(len(fold_pairs)):
507
        print('fold {0}/{1}'.format(i+1, len(fold_pairs)))
508
        test_acc_mix.append(train(images_timewin, np.squeeze(feats[:, -1]) - 1, fold_pairs[i], 'mix', num_epochs=10))
509
    print('*' * 40)
510
    print('Average MIX test accuracy: {0}'.format(np.mean(test_acc_mix)*100))
511
    print('Average CNN test accuracy: {0}'.format(np.mean(test_acc_cnn) * 100))
512
    print('*' * 40)
513
514
    print('Done!')