Diff of /eeg_learn_functions.py [000000] .. [45bab5]

Switch to unified view

a b/eeg_learn_functions.py
1
from __future__ import print_function
2
import time
3
4
import numpy as np
5
np.random.seed(1234)
6
from functools import reduce
7
import math as m
8
9
import scipy.io
10
#import theano
11
#import theano.tensor as T
12
13
from scipy.interpolate import griddata
14
from sklearn.preprocessing import scale
15
#from utils import augment_EEG, cart2sph, pol2cart
16
17
#import lasagne
18
# from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
19
#from lasagne.layers import Conv2DLayer, MaxPool2DLayer, InputLayer
20
#from lasagne.layers import DenseLayer, ElemwiseMergeLayer, FlattenLayer
21
#from lasagne.layers import ConcatLayer, ReshapeLayer, get_output_shape
22
#from lasagne.layers import Conv1DLayer, DimshuffleLayer, LSTMLayer, SliceLayer
23
24
25
def azim_proj(pos):
26
    """
27
    Computes the Azimuthal Equidistant Projection of input point in 3D Cartesian Coordinates.
28
    Imagine a plane being placed against (tangent to) a globe. If
29
    a light source inside the globe projects the graticule onto
30
    the plane the result would be a planar, or azimuthal, map
31
    projection.
32
33
    :param pos: position in 3D Cartesian coordinates
34
    :return: projected coordinates using Azimuthal Equidistant Projection
35
    """
36
    [r, elev, az] = cart2sph(pos[0], pos[1], pos[2])
37
    return pol2cart(az, m.pi / 2 - elev)
38
39
40
def gen_images(locs, features, n_gridpoints, normalize=True,
41
               augment=False, pca=False, std_mult=0.1, n_components=2, edgeless=False):
42
    """
43
    Generates EEG images given electrode locations in 2D space and multiple feature values for each electrode
44
45
    :param locs: An array with shape [n_electrodes, 2] containing X, Y
46
                        coordinates for each electrode.
47
    :param features: Feature matrix as [n_samples, n_features]
48
                                Features are as columns.
49
                                Features corresponding to each frequency band are concatenated.
50
                                (alpha1, alpha2, ..., beta1, beta2,...)
51
    :param n_gridpoints: Number of pixels in the output images
52
    :param normalize:   Flag for whether to normalize each band over all samples
53
    :param augment:     Flag for generating augmented images
54
    :param pca:         Flag for PCA based data augmentation
55
    :param std_mult     Multiplier for std of added noise
56
    :param n_components: Number of components in PCA to retain for augmentation
57
    :param edgeless:    If True generates edgeless images by adding artificial channels
58
                        at four corners of the image with value = 0 (default=False).
59
    :return:            Tensor of size [samples, colors, W, H] containing generated
60
                        images.
61
    """
62
    feat_array_temp = []
63
    nElectrodes = locs.shape[0]     # Number of electrodes
64
    # Test whether the feature vector length is divisible by number of electrodes
65
    assert features.shape[1] % nElectrodes == 0
66
    n_colors = features.shape[1] // nElectrodes
67
    for c in range(int(n_colors)):
68
        feat_array_temp.append(features[:, c * nElectrodes : nElectrodes * (c+1)])
69
    if augment:
70
        if pca:
71
            for c in range(n_colors):
72
                feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=True, n_components=n_components)
73
        else:
74
            for c in range(n_colors):
75
                feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=False, n_components=n_components)
76
    nSamples = features.shape[0]
77
    # Interpolate the values
78
    grid_x, grid_y = np.mgrid[
79
                     min(locs[:, 0]):max(locs[:, 0]):n_gridpoints*1j,
80
                     min(locs[:, 1]):max(locs[:, 1]):n_gridpoints*1j
81
                     ]
82
    temp_interp = []
83
    for c in range(n_colors):
84
        temp_interp.append(np.zeros([nSamples, n_gridpoints, n_gridpoints]))
85
    # Generate edgeless images
86
    if edgeless:
87
        min_x, min_y = np.min(locs, axis=0)
88
        max_x, max_y = np.max(locs, axis=0)
89
        locs = np.append(locs, np.array([[min_x, min_y], [min_x, max_y],[max_x, min_y],[max_x, max_y]]),axis=0)
90
        for c in range(n_colors):
91
            feat_array_temp[c] = np.append(feat_array_temp[c], np.zeros((nSamples, 4)), axis=1)
92
    # Interpolating
93
    for i in range(nSamples):
94
        for c in range(n_colors):
95
            temp_interp[c][i, :, :] = griddata(locs, feat_array_temp[c][i, :], (grid_x, grid_y),
96
                                    method='cubic', fill_value=np.nan)
97
        print('Interpolating {0}/{1}\r'.format(i+1, nSamples), end='\r')
98
    # Normalizing
99
    for c in range(n_colors):
100
        if normalize:
101
            temp_interp[c][~np.isnan(temp_interp[c])] = \
102
                scale(temp_interp[c][~np.isnan(temp_interp[c])])
103
        temp_interp[c] = np.nan_to_num(temp_interp[c])
104
    return np.swapaxes(np.asarray(temp_interp), 0, 1)     # swap axes to have [samples, colors, W, H]
105
106
107
def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):
108
    """
109
    Builds a VGG style CNN network followed by a fully-connected layer and a softmax layer.
110
    Stacks are separated by a maxpool layer. Number of kernels in each layer is twice
111
    the number in previous stack.
112
    input_var: Theano variable for input to the network
113
    outputs: pointer to the output of the last layer of network (softmax)
114
115
    :param input_var: theano variable as input to the network
116
    :param w_init: Initial weight values
117
    :param n_layers: number of layers in each stack. An array of integers with each
118
                    value corresponding to the number of layers in each stack.
119
                    (e.g. [4, 2, 1] == 3 stacks with 4, 2, and 1 layers in each.
120
    :param n_filters_first: number of filters in the first layer
121
    :param imSize: Size of the image
122
    :param n_colors: Number of color channels (depth)
123
    :return: a pointer to the output of last layer
124
    """
125
    weights = []        # Keeps the weights for all layers
126
    count = 0
127
    # If no initial weight is given, initialize with GlorotUniform
128
    if w_init is None:
129
        w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)
130
    # Input layer
131
    network = InputLayer(shape=(None, n_colors, imsize, imsize),
132
                                        input_var=input_var)
133
    for i, s in enumerate(n_layers):
134
        for l in range(s):
135
            network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),
136
                          W=w_init[count], pad='same')
137
            count += 1
138
            weights.append(network.W)
139
        network = MaxPool2DLayer(network, pool_size=(2, 2))
140
    return network, weights
141
142
143
def build_convpool_max(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=3):
144
    """
145
    Builds the complete network with maxpooling layer in time.
146
147
    :param input_vars: list of EEG images (one image per time window)
148
    :param nb_classes: number of classes
149
    :param imsize: size of the input image (assumes a square input)
150
    :param n_colors: number of color channels in the image
151
    :param n_timewin: number of time windows in the snippet
152
    :return: a pointer to the output of last layer
153
    """
154
    convnets = []
155
    w_init = None
156
    # Build 7 parallel CNNs with shared weights
157
    for i in range(n_timewin):
158
        if i == 0:
159
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
160
        else:
161
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
162
        convnets.append(convnet)
163
    # convpooling using Max pooling over frames
164
    convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)
165
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
166
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
167
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
168
    # And, finally, the output layer with 50% dropout on its inputs:
169
    convpool = lasagne.layers.DenseLayer(lasagne.layers.dropout(convpool, p=.5),
170
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
171
    return convpool
172
173
174
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=3):
175
    """
176
    Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.
177
178
    :param input_vars: list of EEG images (one image per time window)
179
    :param nb_classes: number of classes
180
    :param imsize: size of the input image (assumes a square input)
181
    :param n_colors: number of color channels in the image
182
    :param n_timewin: number of time windows in the snippet
183
    :return: a pointer to the output of last layer
184
    """
185
    convnets = []
186
    w_init = None
187
    # Build 7 parallel CNNs with shared weights
188
    for i in range(n_timewin):
189
        if i == 0:
190
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
191
        else:
192
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
193
        convnets.append(FlattenLayer(convnet))
194
    # at this point convnets shape is [numTimeWin][n_samples, features]
195
    # we want the shape to be [n_samples, features, numTimeWin]
196
    convpool = ConcatLayer(convnets)
197
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
198
    convpool = DimshuffleLayer(convpool, (0, 2, 1))
199
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
200
    convpool = Conv1DLayer(convpool, 64, 3)
201
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
202
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
203
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
204
    # And, finally, the output layer with 50% dropout on its inputs:
205
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
206
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
207
    return convpool
208
209
210
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=3):
211
    """
212
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.
213
214
    :param input_vars: list of EEG images (one image per time window)
215
    :param nb_classes: number of classes
216
    :param grad_clip:  the gradient messages are clipped to the given value during
217
                        the backward pass.
218
    :param imsize: size of the input image (assumes a square input)
219
    :param n_colors: number of color channels in the image
220
    :param n_timewin: number of time windows in the snippet
221
    :return: a pointer to the output of last layer
222
    """
223
    convnets = []
224
    w_init = None
225
    # Build 7 parallel CNNs with shared weights
226
    for i in range(n_timewin):
227
        if i == 0:
228
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
229
        else:
230
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
231
        convnets.append(FlattenLayer(convnet))
232
    # at this point convnets shape is [numTimeWin][n_samples, features]
233
    # we want the shape to be [n_samples, features, numTimeWin]
234
    convpool = ConcatLayer(convnets)
235
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
236
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
237
    convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
238
        nonlinearity=lasagne.nonlinearities.tanh)
239
    # We only need the final prediction, we isolate that quantity and feed it
240
    # to the next layer.
241
    convpool = SliceLayer(convpool, -1, 1)      # Selecting the last prediction
242
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
243
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
244
            num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
245
    # And, finally, the output layer with 50% dropout on its inputs:
246
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
247
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
248
    return convpool
249
250
251
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=3):
252
    """
253
    Builds the complete network with LSTM and 1D-conv layers combined
254
255
    :param input_vars: list of EEG images (one image per time window)
256
    :param nb_classes: number of classes
257
    :param grad_clip:  the gradient messages are clipped to the given value during
258
                        the backward pass.
259
    :param imsize: size of the input image (assumes a square input)
260
    :param n_colors: number of color channels in the image
261
    :param n_timewin: number of time windows in the snippet
262
    :return: a pointer to the output of last layer
263
    """
264
    convnets = []
265
    w_init = None
266
    # Build 7 parallel CNNs with shared weights
267
    for i in range(n_timewin):
268
        if i == 0:
269
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
270
        else:
271
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
272
        convnets.append(FlattenLayer(convnet))
273
    # at this point convnets shape is [numTimeWin][n_samples, features]
274
    # we want the shape to be [n_samples, features, numTimeWin]
275
    convpool = ConcatLayer(convnets)
276
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
277
    reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
278
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
279
    conv_out = Conv1DLayer(reformConvpool, 64, 3)
280
    conv_out = FlattenLayer(conv_out)
281
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
282
    lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
283
        nonlinearity=lasagne.nonlinearities.tanh)
284
    lstm_out = SliceLayer(lstm, -1, 1)
285
    # Merge 1D-Conv and LSTM outputs
286
    dense_input = ConcatLayer([conv_out, lstm_out])
287
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
288
    convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
289
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
290
    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
291
    convpool = DenseLayer(convpool,
292
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
293
    return convpool
294
295
296
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
297
    """
298
    Iterates over the samples returing batches of size batchsize.
299
    :param inputs: input data array. It should be a 4D numpy array for images [n_samples, n_colors, W, H] and 5D numpy
300
                    array if working with sequence of images [n_timewindows, n_samples, n_colors, W, H].
301
    :param targets: vector of target labels.
302
    :param batchsize: Batch size
303
    :param shuffle: Flag whether to shuffle the samples before iterating or not.
304
    :return: images and labels for a batch
305
    """
306
    if inputs.ndim == 4:
307
        input_len = inputs.shape[0]
308
    elif inputs.ndim == 5:
309
        input_len = inputs.shape[1]
310
    assert input_len == len(targets)
311
    if shuffle:
312
        indices = np.arange(input_len)
313
        np.random.shuffle(indices)
314
    for start_idx in range(0, input_len, batchsize):
315
        if shuffle:
316
            excerpt = indices[start_idx:start_idx + batchsize]
317
        else:
318
            excerpt = slice(start_idx, start_idx + batchsize)
319
        if inputs.ndim == 4:
320
            yield inputs[excerpt], targets[excerpt]
321
        elif inputs.ndim == 5:
322
            yield inputs[:, excerpt], targets[excerpt]
323
324
325
def train(images, labels, fold, model_type, batch_size=32, num_epochs=5):
326
    """
327
    A sample training function which loops over the training set and evaluates the network
328
    on the validation set after each epoch. Evaluates the network on the training set
329
    whenever the
330
    :param images: input images
331
    :param labels: target labels
332
    :param fold: tuple of (train, test) index numbers
333
    :param model_type: model type ('cnn', '1dconv', 'maxpool', 'lstm', 'mix')
334
    :param batch_size: batch size for training
335
    :param num_epochs: number of epochs of dataset to go over for training
336
    :return: none
337
    """
338
    num_classes = len(np.unique(labels))
339
    (X_train, y_train), (X_val, y_val), (X_test, y_test) = reformatInput(images, labels, fold)
340
    X_train = X_train.astype("float32", casting='unsafe')
341
    X_val = X_val.astype("float32", casting='unsafe')
342
    X_test = X_test.astype("float32", casting='unsafe')
343
    # Prepare Theano variables for inputs and targets
344
    input_var = T.TensorType('floatX', ((False,) * 5))()
345
    target_var = T.ivector('targets')
346
    # Create neural network model (depending on first command line parameter)
347
    print("Building model and compiling functions...")
348
    # Building the appropriate model
349
    if model_type == '1dconv':
350
        network = build_convpool_conv1d(input_var, num_classes)
351
    elif model_type == 'maxpool':
352
        network = build_convpool_max(input_var, num_classes)
353
    elif model_type == 'lstm':
354
        network = build_convpool_lstm(input_var, num_classes, 100)
355
    elif model_type == 'mix':
356
        network = build_convpool_mix(input_var, num_classes, 100)
357
    elif model_type == 'cnn':
358
        input_var = T.tensor4('inputs')
359
        network, _ = build_cnn(input_var)
360
        network = DenseLayer(lasagne.layers.dropout(network, p=.5),
361
                             num_units=256,
362
                             nonlinearity=lasagne.nonlinearities.rectify)
363
        network = DenseLayer(lasagne.layers.dropout(network, p=.5),
364
                             num_units=num_classes,
365
                             nonlinearity=lasagne.nonlinearities.softmax)
366
    else:
367
        raise ValueError("Model not supported ['1dconv', 'maxpool', 'lstm', 'mix', 'cnn']")
368
    # Create a loss expression for training, i.e., a scalar objective we want
369
    # to minimize (for our multi-class problem, it is the cross-entropy loss):
370
    prediction = lasagne.layers.get_output(network)
371
    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
372
    loss = loss.mean()
373
    params = lasagne.layers.get_all_params(network, trainable=True)
374
    updates = lasagne.updates.adam(loss, params, learning_rate=0.001)
375
    # Create a loss expression for validation/testing. The crucial difference
376
    # here is that we do a deterministic forward pass through the network,
377
    # disabling dropout layers.
378
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
379
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
380
                                                            target_var)
381
    test_loss = test_loss.mean()
382
    # As a bonus, also create an expression for the classification accuracy:
383
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
384
                      dtype=theano.config.floatX)
385
    # Compile a function performing a training step on a mini-batch (by giving
386
    # the updates dictionary) and returning the corresponding training loss:
387
    train_fn = theano.function([input_var, target_var], loss, updates=updates)
388
    # Compile a second function computing the validation loss and accuracy:
389
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
390
    # Finally, launch the training loop.
391
    print("Starting training...")
392
    best_validation_accu = 0
393
    # We iterate over epochs:
394
    for epoch in range(num_epochs):
395
        # In each epoch, we do a full pass over the training data:
396
        train_err = 0
397
        train_batches = 0
398
        start_time = time.time()
399
        for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=False):
400
            inputs, targets = batch
401
            train_err += train_fn(inputs, targets)
402
            train_batches += 1
403
        # And a full pass over the validation data:
404
        val_err = 0
405
        val_acc = 0
406
        val_batches = 0
407
        for batch in iterate_minibatches(X_val, y_val, batch_size, shuffle=False):
408
            inputs, targets = batch
409
            err, acc = val_fn(inputs, targets)
410
            val_err += err
411
            val_acc += acc
412
            val_batches += 1
413
        av_train_err = train_err / train_batches
414
        av_val_err = val_err / val_batches
415
        av_val_acc = val_acc / val_batches
416
        # Then we print the results for this epoch:
417
        print("Epoch {} of {} took {:.3f}s".format(
418
            epoch + 1, num_epochs, time.time() - start_time))
419
        print("  training loss:\t\t{:.6f}".format(av_train_err))
420
        print("  validation loss:\t\t{:.6f}".format(av_val_err))
421
        print("  validation accuracy:\t\t{:.2f} %".format(av_val_acc * 100))
422
        if av_val_acc > best_validation_accu:
423
            best_validation_accu = av_val_acc
424
            # After training, we compute and print the test error:
425
            test_err = 0
426
            test_acc = 0
427
            test_batches = 0
428
            for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):
429
                inputs, targets = batch
430
                err, acc = val_fn(inputs, targets)
431
                test_err += err
432
                test_acc += acc
433
                test_batches += 1
434
            av_test_err = test_err / test_batches
435
            av_test_acc = test_acc / test_batches
436
            print("Final results:")
437
            print("  test loss:\t\t\t{:.6f}".format(av_test_err))
438
            print("  test accuracy:\t\t{:.2f} %".format(av_test_acc * 100))
439
            # Dump the network weights to a file like this:
440
            np.savez('weights_lasg_{0}'.format(model_type), *lasagne.layers.get_all_param_values(network))
441
    print('-'*50)
442
    print("Best validation accuracy:\t\t{:.2f} %".format(best_validation_accu * 100))
443
    print("Best test accuracy:\t\t{:.2f} %".format(av_test_acc * 100))
444
445
'''
446
if __name__ == '__main__':
447
    from utils import reformatInput
448
449
    # Load electrode locations
450
    print('Loading data...')
451
    locs = scipy.io.loadmat('../Sample data/Neuroscan_locs_orig.mat')
452
    locs_3d = locs['A']
453
    locs_2d = []
454
    # Convert to 2D
455
    for e in locs_3d:
456
        locs_2d.append(azim_proj(e))
457
458
    feats = scipy.io.loadmat('../Sample data/FeatureMat_timeWin.mat')['features']
459
    print ('Feats Shape: ',feats.shape)
460
    subj_nums = np.squeeze(scipy.io.loadmat('../Sample data/trials_subNums.mat')['subjectNum'])
461
    # Leave-Subject-Out cross validation
462
    fold_pairs = []
463
    for i in np.unique(subj_nums):
464
        ts = subj_nums == i
465
        tr = np.squeeze(np.nonzero(np.bitwise_not(ts)))
466
        ts = np.squeeze(np.nonzero(ts))
467
        np.random.shuffle(tr)  # Shuffle indices
468
        np.random.shuffle(ts)
469
        fold_pairs.append((tr, ts))
470
471
    # CNN Mode
472
    print('Generating images...')
473
    # Find the average response over time windows
474
    av_feats = reduce(lambda x, y: x+y, [feats[:, i*192:(i+1)*192] for i in range(feats.shape[1] / 192)])
475
    av_feats = av_feats / (feats.shape[1] / 192)
476
    images = gen_images(np.array(locs_2d),
477
                                  av_feats,
478
                                  32, normalize=False)
479
    print('\n')
480
481
    # Class labels should start from 0
482
    print('Training the CNN Model...')
483
    train(images, np.squeeze(feats[:, -1]) - 1, fold_pairs[2], 'cnn')
484
485
    # Conv-LSTM Mode
486
    print('Generating images for all time windows...')
487
    images_timewin = np.array([gen_images(np.array(locs_2d),
488
                                                    feats[:, i * 192:(i + 1) * 192], 32, normalize=False) for i in
489
                                         range(feats.shape[1] / 192)
490
                                         ])
491
    print('\n')
492
    print('Training the LSTM-CONV Model...')
493
    train(images_timewin, np.squeeze(feats[:, -1]) - 1, fold_pairs[2], 'mix')
494
495
    print('Done!')
496
'''