import tensorflow as tf
import numpy as np
import os
# Load "X" (the neural network's training and testing inputs)
def load_X(path):
X_signals = []
files = os.listdir(path)
files.sort(key=str.lower)
#file
#['train_acc_x.txt', 'train_acc_y.txt', 'train_acc_z.txt', 'train_gyr_x.txt', 'train_gyr_y.txt', 'train_gyr_z.txt']
for my_file in files:
fileName = os.path.join(path,my_file)
file = open(fileName, 'r')
X_signals.append(
[np.array(cell, dtype=np.float32) for cell in [
row.strip().split(' ') for row in file
]]
)
file.close()
#X_signals = 6*totalStepNum*128
return np.transpose(np.array(X_signals), (1, 2, 0))#(totalStepNum*128*6)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'r')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
y_ = y_ - 1
#one_hot
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
Note: it would be more interesting to use a HyperOpt search space:
https://github.com/hyperopt/hyperopt
"""
def __init__(self, X_train, X_test):
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 200
self.batch_size = 1500
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: 3 * 3D sensors features over time
self.n_hidden = 64 # nb of neurons inside the neural network
self.n_classes = 118 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])),
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)),
'output': tf.Variable(tf.random_normal([self.n_classes]))
}
def LSTM_Network(_X, config):
"""Function returns a TensorFlow RNN with two stacked LSTM cells
Two LSTM cells are stacked which adds deepness to the neural network.
Note, some code of this notebook is inspired from an slightly different
RNN architecture used on another dataset, some of the credits goes to
"aymericdamien".
Args:
_X: ndarray feature matrix, shape: [batch_size, time_steps, n_inputs]
config: Config for the neural network.
Returns:
This is a description of what is returned.
Raises:
KeyError: Raises an exception.
Args:
feature_mat: ndarray fature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
# (NOTE: This step could be greatly optimised by shaping the dataset once
# input shape: (batch_size, n_steps, n_input)
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
# Reshape to prepare input to hidden activation
_X = tf.reshape(_X, [-1, config.n_inputs])
# new shape: (n_steps*batch_size, n_input)
# Linear activation
_X = tf.nn.relu(tf.matmul(_X, config.W['hidden']) + config.biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, config.n_steps, 0)
# new shape: n_steps * (batch_size, n_hidden)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell], state_is_tuple=True)
# Get LSTM cell output
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
def one_hot(y_):
"""
Function to encode output labels from number indexes.
E.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
"""
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
if __name__ == "__main__":
# -----------------------------
# Step 1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
X_train = load_X('../sixAxis/train/record')
X_test = load_X('../sixAxis/test/record')
train_label = load_y('../sixAxis/train/label.txt')
test_label = load_y('../sixAxis/test/label.txt')
# -----------------------------------
# Step 2: define parameters for model
# -----------------------------------
config = Config(X_train, X_test)
print("Some useful info to get an insight on dataset's shape and normalisation:")
print("features shape, labels shape, each features mean, each features standard deviation")
print(X_test.shape, test_label.shape,
np.mean(X_test), np.std(X_test))
print("the dataset is therefore properly normalised, as expected.")
# ------------------------------------------------------
# Step 3: Let's get serious and build the neural network
# ------------------------------------------------------
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
Y = tf.placeholder(tf.float32, [None, config.n_classes])
pred_Y = LSTM_Network(X, config)
# Loss,optimizer,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=pred_Y)) + l2
optimizer = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
# --------------------------------------------
# Step 4: Hooray, now train the neural network
# --------------------------------------------
saver= tf.train.Saver(max_to_keep=1)
# Note that log_device_placement can be turned ON but will cause console spam with RNNs.
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
best_accuracy = 0.0
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size),
range(config.batch_size, config.train_count + 1, config.batch_size)):
sess.run(optimizer, feed_dict={X: X_train[start:end],
Y: train_label[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run(
[pred_Y, accuracy, cost],
feed_dict={
X: X_test,
Y: test_label
}
)
print("traing iter: {},".format(i) +
" test accuracy : {},".format(accuracy_out) +
" loss : {}".format(loss_out))
if accuracy_out > best_accuracy:
best_accuracy = accuracy_out
saver.save(sess,'./singleTimeFix6_ckpt/model')
print("")
f=open('singleTimeFix6.txt','a')
f.write(str(best_accuracy))
f.close()
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")