a b/Models/main-DenseGCN.py
1
#!/usr/bin/env python
2
# -*- coding: utf-8 -*-
3
4
# Import useful packages
5
from __future__ import absolute_import
6
from __future__ import print_function
7
from __future__ import division
8
9
# Hide the Configuration and Warnings
10
import os
11
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
12
13
import tensorflow as tf
14
import numpy as np
15
import pandas as pd
16
from scipy import sparse
17
from Models.Network.lib_for_GCN import DenseGCN_Model, graph, coarsening
18
from Models.DatasetAPI.DataLoader import DatasetLoader
19
20
# Model Name
21
Model = 'Graph_Convolutional_Neural_Network'
22
23
# Clear all the stack and use GPU resources as much as possible
24
tf.reset_default_graph()
25
config = tf.ConfigProto()
26
config.gpu_options.allow_growth = True
27
sess = tf.Session(config=config)
28
29
# Your Dataset Location, for example EEG-Motor-Movement-Imagery-Dataset
30
# The CSV file should be named as training_set.csv, training_label.csv, test_set.csv, and test_label.csv
31
DIR = 'DatasetAPI/EEG-Motor-Movement-Imagery-Dataset/'
32
SAVE = 'Saved_Files/' + Model + '/'
33
if not os.path.exists(SAVE):  # If the SAVE folder doesn't exist, create one
34
    os.mkdir(SAVE)
35
36
# Load the dataset, here it uses one-hot representation for labels
37
train_data, train_labels, test_data, test_labels = DatasetLoader(DIR=DIR)
38
39
# Read the Adjacency matrix
40
Adjacency_Matrix = pd.read_csv(DIR + 'Adjacency_Matrix.csv', header=None)
41
Adjacency_Matrix = np.array(Adjacency_Matrix).astype('float32')
42
Adjacency_Matrix = sparse.csr_matrix(Adjacency_Matrix)
43
44
# This is the coarsen levels, you can definitely change the level to observe the difference
45
graphs, perm = coarsening.coarsen(Adjacency_Matrix, levels=5, self_connections=False)
46
X_train = coarsening.perm_data(train_data, perm)
47
X_test  = coarsening.perm_data(test_data,  perm)
48
49
# Obtain the Graph Laplacian
50
L = [graph.laplacian(Adjacency_Matrix, normalized=True) for Adjacency_Matrix in graphs]
51
52
# Hyper-parameters
53
params = dict()
54
params['dir_name']       = Model
55
params['num_epochs']     = 100
56
params['batch_size']     = 1024
57
params['eval_frequency'] = 100
58
59
# Building blocks.
60
params['filter'] = 'chebyshev5'
61
params['brelu']  = 'b2relu'
62
params['pool']   = 'mpool1'
63
64
# Architecture.
65
params['F'] = [16, 32, 64, 128, 256, 512]         # Number of graph convolutional filters.
66
params['K'] = [2, 2, 2, 2, 2, 2]                  # Polynomial orders.
67
params['p'] = [1, 1, 1, 1, 1, 1]                  # Pooling sizes.
68
params['M'] = [4]                                 # Output dimensionality of fully connected layers.
69
70
# Optimization.
71
params['regularization'] = 0.001     # L2 regularization
72
params['dropout']        = 0.50      # Dropout rate
73
params['learning_rate']  = 0.01      # Learning rate
74
params['decay_rate']     = 1         # Learning rate Decay == 1 means no Decay
75
params['momentum']       = 0         # momentum == 0 means Use Adam Optimizer
76
params['decay_steps']    = np.shape(train_data)[0] / params['batch_size']
77
78
# Train model
79
model = DenseGCN_Model.cgcnn(L, **params)
80
accuracy, loss, t_step = model.fit(X_train, train_labels, X_test, test_labels)