|
a |
|
b/Models/Network/CNN.py |
|
|
1 |
#!/usr/bin/env python |
|
|
2 |
# -*- coding: utf-8 -*- |
|
|
3 |
|
|
|
4 |
# Import useful packages |
|
|
5 |
import tensorflow as tf |
|
|
6 |
from Models.Initialize_Variables.Initialize import * |
|
|
7 |
|
|
|
8 |
|
|
|
9 |
def CNN(Input, keep_prob): |
|
|
10 |
''' |
|
|
11 |
|
|
|
12 |
Args: |
|
|
13 |
Input: The reshaped input EEG signals |
|
|
14 |
keep_prob: The Keep probability of Dropout |
|
|
15 |
|
|
|
16 |
Returns: |
|
|
17 |
prediction: Final prediction of CNN Model |
|
|
18 |
|
|
|
19 |
''' |
|
|
20 |
|
|
|
21 |
# Input reshaped EEG signals |
|
|
22 |
x_Reshape = tf.reshape(tensor=Input, shape=[-1, 64, 64, 1]) |
|
|
23 |
|
|
|
24 |
# First Convolutional Layer |
|
|
25 |
W_conv1 = weight_variable([3, 3, 1, 32]) |
|
|
26 |
b_conv1 = bias_variable([32]) |
|
|
27 |
h_conv1 = tf.nn.conv2d(x_Reshape, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1 |
|
|
28 |
h_conv1_Acti = tf.nn.leaky_relu(h_conv1) |
|
|
29 |
h_conv1_drop = tf.nn.dropout(h_conv1_Acti, keep_prob, noise_shape=[tf.shape(h_conv1_Acti)[0], 1, 1, tf.shape(h_conv1_Acti)[3]]) |
|
|
30 |
|
|
|
31 |
# Second Convolutional Layer |
|
|
32 |
W_conv2 = weight_variable([3, 3, 32, 32]) |
|
|
33 |
b_conv2 = bias_variable([32]) |
|
|
34 |
h_conv2 = tf.nn.conv2d(h_conv1_drop, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2 |
|
|
35 |
h_conv2_BN = tf.layers.batch_normalization(h_conv2, training=True) |
|
|
36 |
h_conv2_Acti = tf.nn.leaky_relu(h_conv2_BN) |
|
|
37 |
|
|
|
38 |
# Third Convolutional Layer |
|
|
39 |
W_conv3 = weight_variable([3, 3, 64, 64]) |
|
|
40 |
b_conv3 = bias_variable([64]) |
|
|
41 |
h_conv3_res = tf.concat([h_conv2_Acti, h_conv1_drop], axis=3) |
|
|
42 |
h_conv3 = tf.nn.conv2d(h_conv3_res, W_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3 |
|
|
43 |
h_conv3_Acti = tf.nn.leaky_relu(h_conv3) |
|
|
44 |
h_conv3_drop = tf.nn.dropout(h_conv3_Acti, keep_prob, noise_shape=[tf.shape(h_conv3_Acti)[0], 1, 1, tf.shape(h_conv3_Acti)[3]]) |
|
|
45 |
|
|
|
46 |
# First Max Pooling Layer |
|
|
47 |
h_pool3 = tf.nn.max_pool(h_conv3_drop, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') |
|
|
48 |
|
|
|
49 |
# Fourth Convolutional Layer |
|
|
50 |
W_conv4 = weight_variable([3, 3, 64, 64]) |
|
|
51 |
b_conv4 = bias_variable([64]) |
|
|
52 |
h_conv4 = tf.nn.conv2d(h_pool3, W_conv4, strides=[1, 1, 1, 1], padding='VALID') + b_conv4 |
|
|
53 |
h_conv4_BN = tf.layers.batch_normalization(h_conv4, training=True) |
|
|
54 |
h_conv4_Acti = tf.nn.leaky_relu(h_conv4_BN) |
|
|
55 |
h_conv4_drop = tf.nn.dropout(h_conv4_Acti, keep_prob, noise_shape=[tf.shape(h_conv4_Acti)[0], 1, 1, tf.shape(h_conv4_Acti)[3]]) |
|
|
56 |
|
|
|
57 |
# Fifth Convolutional Layer |
|
|
58 |
W_conv5 = weight_variable([3, 3, 64, 64]) |
|
|
59 |
b_conv5 = bias_variable([64]) |
|
|
60 |
h_conv5 = tf.nn.conv2d(h_conv4_drop, W_conv5, strides=[1, 1, 1, 1], padding='SAME') + b_conv5 |
|
|
61 |
h_conv5_BN = tf.layers.batch_normalization(h_conv5, training=True) |
|
|
62 |
h_conv5_Acti = tf.nn.leaky_relu(h_conv5_BN) |
|
|
63 |
|
|
|
64 |
# Sixth Convolutional Layer |
|
|
65 |
W_conv6 = weight_variable([3, 3, 128, 128]) |
|
|
66 |
b_conv6 = bias_variable([128]) |
|
|
67 |
h_conv6_res = tf.concat([h_conv5_Acti, h_conv4_drop], axis=3) |
|
|
68 |
h_conv6 = tf.nn.conv2d(h_conv6_res, W_conv6, strides=[1, 1, 1, 1], padding='SAME') + b_conv6 |
|
|
69 |
h_conv6_Acti = tf.nn.leaky_relu(h_conv6) |
|
|
70 |
h_conv6_drop = tf.nn.dropout(h_conv6_Acti, keep_prob, noise_shape=[tf.shape(h_conv6_Acti)[0], 1, 1, tf.shape(h_conv6_Acti)[3]]) |
|
|
71 |
|
|
|
72 |
# Second Max Pooling Layer |
|
|
73 |
h_pool6 = tf.nn.max_pool(h_conv6_drop, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') |
|
|
74 |
|
|
|
75 |
# Flatten Layer |
|
|
76 |
h_pool6_flat = tf.reshape(h_pool6, [-1, 15 * 15 * 128]) |
|
|
77 |
|
|
|
78 |
# First Fully Connected Layer |
|
|
79 |
W_fc1 = weight_variable([15 * 15 * 128, 512]) |
|
|
80 |
b_fc1 = bias_variable([512]) |
|
|
81 |
h_fc1 = tf.matmul(h_pool6_flat, W_fc1) + b_fc1 |
|
|
82 |
h_fc1_BN = tf.layers.batch_normalization(h_fc1, training=True) |
|
|
83 |
h_fc1_Acti = tf.nn.leaky_relu(h_fc1_BN) |
|
|
84 |
h_fc1_drop = tf.nn.dropout(h_fc1_Acti, keep_prob) |
|
|
85 |
|
|
|
86 |
# Second Fully Connected Layer |
|
|
87 |
W_fc2 = weight_variable([512, 4]) |
|
|
88 |
b_fc2 = bias_variable([4]) |
|
|
89 |
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) |
|
|
90 |
|
|
|
91 |
return prediction |