|
a |
|
b/lstm_kmean/utils.py |
|
|
1 |
import numpy as np |
|
|
2 |
import os |
|
|
3 |
from glob import glob |
|
|
4 |
from natsort import natsorted |
|
|
5 |
import tensorflow as tf |
|
|
6 |
from functools import partial |
|
|
7 |
|
|
|
8 |
data_cls = natsorted(glob('../../data/b2i_data/images/train/*')) |
|
|
9 |
cls2idx = {key.split(os.path.sep)[-1]:idx for idx, key in enumerate(data_cls, start=0)} |
|
|
10 |
idx2cls = {value:key for key, value in cls2idx.items()} |
|
|
11 |
|
|
|
12 |
# def map_func(path): |
|
|
13 |
# eeg, class_name, subject_name = np.load(path, allow_pickle=True) |
|
|
14 |
# eeg = np.transpose(eeg, [1, 0]) |
|
|
15 |
# class_idx = cls2idx[class_name] |
|
|
16 |
# return tf.cast(eeg, dtype=tf.float32), tf.cast(class_idx, dtype=tf.int32) |
|
|
17 |
|
|
|
18 |
# def load_complete_data(data_path, batch_size=16): |
|
|
19 |
# dataset = tf.data.Dataset.list_files(data_path) |
|
|
20 |
# dataset = dataset.map(lambda X: tf.numpy_function( partial(map_func), [X], [tf.float32, tf.int32,],), num_parallel_calls=tf.data.experimental.AUTOTUNE) |
|
|
21 |
# dataset = dataset.shuffle(buffer_size=2*batch_size).batch(batch_size, drop_remainder=False).prefetch(tf.data.experimental.AUTOTUNE) |
|
|
22 |
# return dataset |
|
|
23 |
|
|
|
24 |
def preprocess_data(X, Y): |
|
|
25 |
X = tf.squeeze(X, axis=-1) |
|
|
26 |
max_val = tf.reduce_max(X)/2.0 |
|
|
27 |
X = (X - max_val) / max_val |
|
|
28 |
X = tf.transpose(X, [1, 0]) |
|
|
29 |
X = tf.cast(X, dtype=tf.float32) |
|
|
30 |
Y = tf.argmax(Y) |
|
|
31 |
return X, Y |
|
|
32 |
|
|
|
33 |
def load_complete_data(X, Y, batch_size=16): |
|
|
34 |
dataset = tf.data.Dataset.from_tensor_slices((X, Y)).map(preprocess_data).shuffle(buffer_size=2*batch_size).batch(batch_size, drop_remainder=False).prefetch(tf.data.experimental.AUTOTUNE) |
|
|
35 |
return dataset |