Diff of /train.py [000000] .. [56ff0f]

Switch to unified view

a b/train.py
1
2
import matplotlib.pyplot as plt
3
import numpy as np
4
import time
5
import tensorflow as tf
6
import pickle
7
import wfdb
8
from sklearn.utils import class_weight
9
from sklearn.model_selection import train_test_split
10
11
# Hyper-parameters
12
sequence_length = 240
13
epochs = 1000#int(input('Enter Number of Epochs (or enter default 1000): '))
14
FS = 100.0
15
16
def z_norm(result):
17
    result_mean = np.mean(result)
18
    result_std = np.std(result)
19
    result = (result - result_mean) / result_std
20
    return result
21
22
def split_data(X):
23
    X1 = []
24
    X2 = []
25
    for index in range(len(X)):
26
        X1.append([X[index][0], X[index][1]])
27
        X2.append([X[index][2], X[index][3]])
28
29
    return np.array(X1).astype('float64'), np.array(X2).astype('float64')
30
31
def get_data():
32
    with open('train_input.pickle','rb') as f: 
33
        X_train = np.asarray(pickle.load(f))
34
    with open('train_label.pickle','rb') as f: 
35
        y_train = np.asarray(pickle.load(f))
36
    with open('val_input.pickle','rb') as f: 
37
        X_val = np.asarray(pickle.load(f))
38
    with open('val_label.pickle','rb') as f: 
39
        y_val = np.asarray(pickle.load(f))
40
    with open('test_input.pickle','rb') as f: 
41
        X_test = np.asarray(pickle.load(f))
42
    with open('test_label.pickle','rb') as f: 
43
        y_test = np.asarray(pickle.load(f))
44
45
46
    #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
47
    '''
48
    X_train = X_train[:, 0, :]
49
    X_test = X_test[:, 0, :]
50
    X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
51
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
52
    '''
53
    X_train1, X_train2 = split_data(X_train)
54
    X_val1, X_val2 = split_data(X_val)
55
    X_test1, X_test2 = split_data(X_test)
56
57
    X_train1 = np.transpose(X_train1, (0, 2, 1))
58
    #X_train2 = np.reshape(X_train2, (X_train2.shape[0], X_train2.shape[1], 1))
59
    X_test1 = np.transpose(X_test1, (0, 2, 1))
60
    #X_test2 = np.reshape(X_test2, (X_test2.shape[0], X_test2.shape[1], 1))
61
    X_val1 = np.transpose(X_val1, (0, 2, 1))
62
    return X_train1, X_train2, y_train, X_val1, X_val2, y_val, X_test1, X_test2, y_test
63
64
65
66
def build_model():
67
    
68
    layers = {'input': 2, 'hidden1': 256, 'hidden2': 256, 'hidden3': 256, 'output': 1}
69
    x1 = tf.keras.layers.Input(shape=(sequence_length, layers['input']))
70
    m1 = tf.keras.layers.LSTM(layers['hidden1'],                   
71
                    recurrent_dropout=0.5,
72
                   return_sequences=True)(x1)
73
    m1 = tf.keras.layers.LSTM(
74
            layers['hidden2'],
75
            recurrent_dropout=0.5,
76
            return_sequences=True)(m1)
77
78
    m1 = tf.keras.layers.LSTM(
79
            layers['hidden3'],
80
            recurrent_dropout=0.5,
81
            return_sequences=False)(m1)
82
83
    x2 = tf.keras.layers.Input(shape=(2,))
84
    m2 = tf.keras.layers.Dense(32)(x2)
85
86
    #merged = Merge([model1, model2], mode='concat')
87
    merged = tf.keras.layers.Concatenate(axis=1)([m1, m2])
88
89
    out = tf.keras.layers.Dense(8)(merged)
90
    out = tf.keras.layers.Dense(layers['output'], kernel_initializer='normal')(out)
91
    out = tf.keras.layers.Activation("sigmoid")(out)
92
    
93
    model = tf.keras.models.Model(inputs=[x1, x2], outputs=[out])
94
95
    start = time.time()
96
    model.compile(loss="binary_crossentropy", optimizer="adam",
97
                  metrics = ['accuracy'])
98
    print ("Compilation Time : ", time.time() - start)
99
100
    model.summary()
101
    return model
102
103
104
def run_network(model=None, data=None):
105
    global_start_time = time.time()
106
107
    print ('\nData Loaded. Compiling...\n')
108
    print('Loading data... ')
109
    X_train1, X_train2, y_train, X_val1, X_val2, y_val, X_test1, X_test2, y_test = get_data()
110
111
    class_w = class_weight.compute_class_weight(class_weight='balanced',
112
                                                     classes=np.unique(y_train),
113
                                                     y=y_train)
114
115
    print (class_w)
116
117
    if model is None:
118
        model = build_model()
119
120
    try:
121
        print("Training")
122
123
        class_w = {i : class_w[i] for i in range(2)}
124
        callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
125
        history = model.fit([X_train1, X_train2], y_train, 
126
                            validation_data=([X_val1, X_val2], y_val),
127
                            callbacks=[callback],
128
                             epochs=epochs, batch_size=256, class_weight=class_w)
129
130
        import matplotlib.pyplot as plt
131
        '''
132
        plt.plot(history.losses)
133
        plt.ylabel('loss')
134
        plt.xlabel('epoch')
135
        plt.legend(['train'], loc='upper left')
136
        plt.show()
137
        '''
138
        # Evaluate Model
139
        y_pred = model.predict([X_test1, X_test2])
140
        scores = model.evaluate([X_test1, X_test2], y_test)
141
        print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
142
143
144
    except KeyboardInterrupt:
145
        print("prediction exception")
146
        print ('Training duration (s) : ', time.time() - global_start_time)
147
        return model
148
149
150
    print ('Training duration (s) : ', time.time() - global_start_time)
151
152
    return model
153
154
run_network()