a b/Code/finalc.py
1
2
3
#IMPORTING NECESSARY LIBRARIES
4
import matplotlib.pyplot as plt #import matplotlib
5
from matplotlib.pyplot import figure #import figure from matplotlib.pyplot
6
import numpy as np #import numpy
7
import tensorflow as tf #import tensorflow
8
import tensorflow_datasets as tfds #import tensorflow_datasets
9
10
from tensorflow import keras #import keras
11
import pandas as pd #import pandas
12
from scipy.fftpack import irfft, rfft #import rfft and irfft functions
13
14
print("importing datasets")
15
16
#IMPORTING THE DATASET pure.xlsx
17
df = pd.read_excel('pure.xlsx',header=None) #import pure dataset
18
pure_acc = df.to_numpy()
19
20
#IMPORTING THE DATASET noisy.xlsx
21
df = pd.read_excel('noisy.xlsx',header=None)#import noisy dataset
22
noisy_acc = df.to_numpy() #convert to numpy array
23
24
#IMPORTING THE DATASET test_pure.xlsx
25
df = pd.read_excel('test_pure.xlsx',header=None)#import test_pure dataset
26
test_pure_acc = df.to_numpy()#convert to numpy array
27
28
#IMPORTING THE DATASET test_noisy.xlsx
29
df = pd.read_excel('test_noisy.xlsx',header=None)#import test_noisy dataset
30
test_noisy_acc = df.to_numpy()#convert to numpy array
31
32
print("RANDOMIZING ORDER OF DATASET TO AVOID BIAS")
33
#RANDOMIZING ORDER OF DATASET TO AVOID BIAS
34
n = np.linspace(0,noisy_acc.shape[0]-1,noisy_acc.shape[0],dtype=int) #create array from 0 to 700
35
np.random.shuffle(n) #shauffle the ARRAY
36
temp_noisy = noisy_acc #save noisy dataset in temporary array
37
temp_pure = pure_acc #save pure dataset in temporary array
38
39
for i in n:
40
  noisy_acc[i] = temp_noisy[n[i]]#randomizing noisy datasets
41
  pure_acc[i] = temp_pure[n[i]]#randomizing pure dataset
42
43
print("DATA PREPROCESSING, CONVERTING TRAINING DATA TO FREQUENCY DOMAIN")
44
45
#DATA PREPROCESSING, CONVERTING TRAINING DATA TO FREQUENCY DOMAIN
46
noisy_acc_freq = np.zeros((noisy_acc.shape[0],noisy_acc.shape[1]),np.float64) #create array of zeros of same shape as noisy_acc
47
for i in range(noisy_acc.shape[0]): #looping in noisy_acc ARRAY
48
  noisy_acc_freq[i] = rfft(noisy_acc[i]) #converting all signals to freq domin
49
50
pure_acc_freq = np.zeros((pure_acc.shape[0],pure_acc.shape[1]),np.float64) #create array of zeros of same shape as pure_acc
51
for i in range(pure_acc.shape[0]): #looping in pure_acc ARRAY
52
  pure_acc_freq[i] = rfft(pure_acc[i])#converting all signals to freq domin
53
54
noisy_acc_freq = noisy_acc_freq.reshape(noisy_acc_freq.shape[0],noisy_acc_freq.shape[1],1)#RESHAPING THE NOISY ACCELEROMETER ARRAY
55
56
print("MODELING THE CNN FOR DENOISING")
57
58
#MODELING THE CNN FOR DENOISING
59
# linear activations are used as unlike regression it is a pattern to pattern matching
60
model1 = keras.Sequential([ #CREATING sequential model
61
   keras.layers.ZeroPadding1D(padding=3), #zeropadding layer
62
   keras.layers.Conv1D(16, 7, strides=1, activation='linear'), #convolutional layer filter size=7, no of filters=16
63
   keras.layers.ZeroPadding1D(padding=8), #zeropadding layer
64
   keras.layers.Conv1D(32, 3, strides=1, activation='linear'),#convolutional layer filter size=3, no of filters=32
65
   keras.layers.Conv1D(32, 3, strides=1, activation='linear'),#convolutional layer filter size=3, no of filters=32
66
   keras.layers.Conv1D(32, 3, strides=1, activation='linear'),#convolutional layer filter size=3, no of filters=32
67
   keras.layers.Conv1D(16, 3, strides=1, activation='linear'),#convolutional layer filter size=3, no of filters=16
68
   keras.layers.Conv1D(16, 3, strides=1, activation='linear'),#convolutional layer filter size=3, no of filters=16
69
   keras.layers.Conv1D(16, 3, strides=1, activation='linear'),#convolutional layer filter size=3, no of filters=16
70
   keras.layers.Flatten(), #flatten layer
71
   keras.layers.Dense(16, activation='linear'),#fully connected layer, size = 16
72
   keras.layers.Dense(pure_acc_freq.shape[1], activation=None)#fully connected layer size=size of signal
73
])
74
75
optim = tf.keras.optimizers.Adam(3e-4) #using adam optimizer, learning rate=3 x 10^-4
76
77
model1.compile(optimizer=optim,  loss = 'mse',metrics=[tf.keras.metrics.RootMeanSquaredError('rmse')]) #defining loss function and optimizer functions
78
79
model1.fit(noisy_acc_freq, pure_acc_freq, epochs=100, batch_size=16) #training model1 for 100 epochs on batch size of 16
80
81
print("MODELING THE ANN FOR DENOISING")
82
#MODELING THE ANN FOR DENOISING
83
# linear activations are used as unlike regression it is a pattern to pattern matching
84
model2 = keras.Sequential([#CREATING sequential model
85
   keras.layers.Flatten(),#flatten layer
86
   keras.layers.Dense(4096, activation='linear'), #fully connected layer, size = 4096
87
   keras.layers.Dense(8192, activation='linear'),#fully connected layer, size = 8192
88
   keras.layers.Dense(4096, activation='linear'),#fully connected layer, size = 4096
89
   keras.layers.Dense(2048, activation='linear'),#fully connected layer, size = 2048
90
   keras.layers.Dense(pure_acc_freq.shape[1], activation=None)#fully connected layer, size = size of signal
91
])
92
93
optim = tf.keras.optimizers.SGD(1e-3)#using SGD optimizer, learning rate=1 x 10^-3
94
#the momentum aspect of Adam caused it to spiral out of control
95
96
model2.compile(optimizer=optim, loss = 'mse', metrics=[tf.keras.metrics.RootMeanSquaredError('rmse')])#defining loss function and optimizer functions
97
98
model2.fit(noisy_acc_freq, pure_acc_freq, epochs=100, batch_size=12)#training model2 for 100 epochs on batch size 12
99
100
print("SAVING THE MODEL")
101
#SAVING THE MODEL
102
model1.save('cnn1.h5')#SAVING MODEL1
103
model2.save('ann1.h5')#SAVING MODEL2
104
new_model = tf.keras.models.load_model('cnn1.h5') #loading CNN under the variable new_model
105
106
print("COMPARING THE PURE, NOISY AND DENOISED SIGNALS USING MATPLOTLIB")
107
#COMPARING THE PURE, NOISY AND DENOISED SIGNALS USING MATPLOTLIB
108
z = test_noisy_acc[1] #z has signal no. 1038 from the test_noisy dataset.
109
z= rfft(z) # converting z to freq DOMAIN
110
z = z.reshape(1,z.shape[0],1) #reshaping z ARRAY
111
y_denoised = new_model.predict(z) #denoising z using new_model
112
y_denoised = irfft(y_denoised) #converting z back to time DOMAIN
113
figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k') #defining size and shape of the plot
114
x = np.linspace(start=0,stop=7,num=701) #creating array= {0,0.01,0.02....6.98,6.99,7}
115
y_noisy = test_noisy_acc[1]  #plotting the noisy SIGNAL
116
plt.plot(x,y_noisy)             # plotting NOISY SIGNAL
117
figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')#defining size and shape of the plot
118
plt.plot(x,pure_acc[1])      #plotting PURE SIGNAL
119
figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')#defining size and shape of the plot
120
plt.plot(x,y_denoised.reshape(701))     #plotting DENOISED SIGNAL
121
122
print("CALCULATING IDR BY CALCULATING RELATIVE DISPLACEMENTS BY DOUBLE INTEGRATION:pure")
123
#NOW THAT OUR ML ALGORITHM IS WORKING FINE, WE WILL MOVE ON TO EVALUATING IT USING [IO,LS,CP]
124
#CALCULATING IDR BY CALCULATING RELATIVE DISPLACEMENTS BY DOUBLE INTEGRATION. AND FINALLY CREATING [IO,LS,CP] ARRAY FOR FULL PURE #DATASET
125
time = 1/360 #time taken between 2 readings. Sampling rate = 360Hz
126
127
pure_classification = np.zeros((int(test_pure_acc.shape[0]*0.5),3),np.float64) #creatng array of zeros for classinfiying the pure signals
128
129
v = np.zeros(test_pure_acc.shape,np.float64) #velocity
130
disp = np.zeros(test_pure_acc.shape,np.float64) #displacement
131
floor_height = 2.75 #standard floor height in India = 2.75m
132
133
134
for i in range(0,test_pure_acc.shape[0]): #iterating over the test_pure acceleration signals
135
  for j in range(1,test_pure_acc.shape[1]):   #going over each sample
136
      v[i][j] = v[i][j-1] + (((test_pure_acc[i][j-1]+test_pure_acc[i][j])/2) * (time)) #integrating for velocity
137
138
for i in range(0,test_pure_acc.shape[0]):#iterating over the velocity
139
  for j in range(1,test_pure_acc.shape[1]):  #going over each sample
140
      disp[i][j] = disp[i][j-1] + (((v[i][j-1]+v[i][j])/2) * (time)) #integrating for displacement
141
142
for i in range(0,disp.shape[0],2): #going over adjacent displacement arrays
143
  idr = np.zeros(disp.shape[1],np.float64) #creating array of zeros for IDRs
144
145
  for j in range(disp.shape[1]): #going over adjacent displacement arrays
146
    idr[j] = ( np.abs(disp[i][j]-disp[i+1][j]) )/(floor_height) #calculating IDRs
147
148
  '''
149
 if idr < 0.007       => Immediate Occupancy
150
 if idr 0.007 to 0.05 => Life Safety
151
 if idr >0.05         => Collapse prevention
152
 '''
153
  scores=np.array([0,0,0]) #scores
154
  '''
155
 io_score=0th index
156
 ls_score=1st index
157
 cp_score=2nd index
158
 '''
159
  for k in range (idr.shape[0]):#calculating scores for function/array of IDRs
160
    if idr[k]<0.007:#if IDR<0.007
161
      scores[0]+=1 #increment scores[0] by 1
162
    elif idr[k]>0.05:#if IDR>0.05
163
      scores[2]+=1 #increment scores[2] by 1
164
    else:#if 0.007<IDR<0.05
165
      scores[1]+=1#increment scores[2] by 1
166
167
  #most severe score is considered for labeling the dataset
168
  if scores[2]>0:#if scores[2]>0:
169
    scores = [0,0,1]#scores becomes [0,0,1]
170
  elif scores[1]>0:#if scores[1]>0 and scores[2]=0:
171
    scores = [0,1,0]#scores becomes [0,1,0]
172
  else: #if scores[0]>0 and scores[1]=0 and scores[2]=0
173
    scores = [1,0,0]#scores becomes [1,0,0]
174
175
  pure_classification[int(i/2)]=scores #adding scores to pure_classification array
176
  scores=np.array([0,0,0]) #resetting scores array
177
178
#FOR NOISY CLASSIFICATION
179
180
print("CALCULATING IDR BY CALCULATING RELATIVE DISPLACEMENTS BY DOUBLE INTEGRATION:noisy")
181
time = 1/360 #time taken between 2 readings. Sampling rate = 360Hz
182
183
noisy_classification = np.zeros((int(test_noisy_acc.shape[0]*0.5),3),np.float64)#creatng array of zeros for classinfiying the noisy signals
184
185
v = np.zeros(test_noisy_acc.shape,np.float64) #velocity
186
disp = np.zeros(test_noisy_acc.shape,np.float64) #displacement
187
188
189
for i in range(0,test_noisy_acc.shape[0]):#iterating over the test_noisy acceleration signals
190
  for j in range(1,test_noisy_acc.shape[1]):  #going over each sample
191
      v[i][j] = v[i][j-1] + (((test_noisy_acc[i][j-1]+test_noisy_acc[i][j])/2) * (time)) #integrating for velocity
192
193
for i in range(0,test_noisy_acc.shape[0]):#iterating over the velocity signals
194
  for j in range(1,test_noisy_acc.shape[1]):#going over each sample
195
      disp[i][j] = disp[i][j-1] + (((v[i][j-1]+v[i][j])/2) * (time))#integrating for displacement
196
197
for i in range(0,disp.shape[0],2): #going over adjacent displacement arrays
198
  idr = np.zeros(disp.shape[1],np.float64)#creating array of zeros for IDRs
199
200
  for j in range(disp.shape[1]): #going over adjacent displacement arrays
201
    idr[j] = ( np.abs(disp[i][j]-disp[i+1][j]) )/(floor_height)#calculating IDRs
202
203
  '''
204
 if idr < 0.007       => Immediate Occupancy
205
 if idr 0.007 to 0.05 => Life Safety
206
 if idr >0.05         => Collapse prevention
207
 '''
208
  scores=np.array([0,0,0])#scores
209
  '''
210
 io_score=0th index
211
 ls_score=1st index
212
 cp_score=2nd index
213
 '''
214
  for k in range (idr.shape[0]):#calculating scores for function/array of IDRs
215
    if idr[k]<0.007:#if IDR<0.007
216
      scores[0]+=1 #increment scores[0] by 1
217
    elif idr[k]>0.05:#if IDR>0.05
218
      scores[2]+=1 #increment scores[2] by 1
219
    else:#if 0.007<IDR<0.05
220
      scores[1]+=1#increment scores[2] by 1
221
222
#most severe score is considered for labeling the dataset
223
  if scores[2]>0:#if scores[2]>0:
224
    scores = [0,0,1]#scores becomes [0,0,1]
225
  elif scores[1]>0:#if scores[1]>0 and scores[2]=0:
226
    scores = [0,1,0]#scores becomes [0,1,0]
227
  else: #if scores[0]>0 and scores[1]=0 and scores[2]=0
228
    scores = [1,0,0]#scores becomes [1,0,0]
229
230
  noisy_classification[int(i/2)]=scores#adding scores to noisy_classification array
231
  scores=np.array([0,0,0])#resetting scores array
232
233
#calculating accuracy without denoising.
234
235
b=0 # b=0
236
for i in range(pure_classification.shape[0]):#iterating over pure_classification
237
  if pure_classification[i][0] == noisy_classification[i][0] and pure_classification[i][1] == noisy_classification[i][1] and pure_classification[i][2] == noisy_classification[i][2] : #if pure and noisy classifications match
238
    b+=1 #increment b by 1
239
240
print("accuracy without denoising = ",end="") #priniting text
241
print(np.float64(b)/pure_classification.shape[0])#printing accuracy. divide b by the total number of classifications
242
243
print("denoising noisy test set")
244
#DENOISING TEST_NOISY_ACC
245
test_denoised_acc = np.zeros((test_noisy_acc.shape[0],test_noisy_acc.shape[1]),np.float64) # create empty array of zeros to store denoised signals of test_noisy
246
for i in range(test_noisy_acc.shape[0]): #iterate over test_noisy dataset
247
  z = rfft(test_noisy_acc[i]) #convert to freq DOMAIN
248
  z = z.reshape(1,701,1) #reshape the array containing the signal
249
  test_denoised_acc[i] = irfft(new_model.predict(z))#denoise the signal and convert it back to time domain an store it.
250
251
252
print("CALCULATING IDR BY CALCULATING RELATIVE DISPLACEMENTS BY DOUBLE INTEGRATION:denoised")
253
#GETTING CLASSIFICATION ARRAY FOR DENOISED SIGNALS
254
time = 1/360 #time taken between 2 readings. Sampling rate = 360Hz
255
256
denoised_classification = np.zeros((int(test_denoised_acc.shape[0]*0.5),3),np.float64)#creatng array of zeros for classinfiying the denoised signals
257
258
v = np.zeros(test_denoised_acc.shape,np.float64) #velocity
259
disp = np.zeros(test_denoised_acc.shape,np.float64) #displacement
260
261
262
for i in range(0,test_denoised_acc.shape[0]):#iterating over the test_denoised signals
263
  for j in range(1,test_denoised_acc.shape[1]):   #going over each sample
264
      v[i][j] = v[i][j-1] + (((test_denoised_acc[i][j-1]+test_denoised_acc[i][j])/2) * (time))#integrating for velocity
265
266
for i in range(0,test_denoised_acc.shape[0]):#iterating over the velocity signals
267
  for j in range(1,test_denoised_acc.shape[1]):   #going over each sample
268
      disp[i][j] = disp[i][j-1] + (((v[i][j-1]+v[i][j])/2) * (time))#integrating for displacement
269
270
for i in range(0,disp.shape[0],2):#going over adjacent displacement array
271
  idr = np.zeros(disp.shape[1],np.float64)#creating array of zeros for IDRs
272
273
  for j in range(disp.shape[1]):#going over adjacent displacement arrays
274
    idr[j] = ( np.abs(disp[i][j]-disp[i+1][j]) )/(floor_height)#calculating IDRs
275
276
  '''
277
 if idr < 0.007       => Immediate Occupancy
278
 if idr 0.007 to 0.05 => Life Safety
279
 if idr >0.05         => Collapse prevention
280
 '''
281
  scores=np.array([0,0,0])#scores
282
  '''
283
 io_score=0th index
284
 ls_score=1st index
285
 cp_score=2nd index
286
 '''
287
  for k in range (idr.shape[0]):#calculating scores for function/array of IDRs
288
    if idr[k]<0.007:#if IDR<0.007
289
      scores[0]+=1 #increment scores[0] by 1
290
    elif idr[k]>0.05:#if IDR>0.05
291
      scores[2]+=1 #increment scores[2] by 1
292
    else:#if 0.007<IDR<0.05
293
      scores[1]+=1#increment scores[2] by 1
294
295
#most severe score is considered for labeling the dataset
296
  if scores[2]>0:#if scores[2]>0:
297
    scores = [0,0,1]#scores becomes [0,0,1]
298
  elif scores[1]>0:#if scores[1]>0 and scores[2]=0:
299
    scores = [0,1,0]#scores becomes [0,1,0]
300
  else: #if scores[0]>0 and scores[1]=0 and scores[2]=0
301
    scores = [1,0,0]#scores becomes [1,0,0]
302
303
  denoised_classification[int(i/2)]=scores#adding scores to denoised_classification array
304
  scores=np.array([0,0,0])#resetting scores array
305
306
#calculating accuracy with denoising.
307
308
b=0 # b=0
309
for i in range(pure_classification.shape[0]):#iterating over pure_classification
310
  if pure_classification[i][0] == denoised_classification[i][0] and pure_classification[i][1] == denoised_classification[i][1] and pure_classification[i][2] == denoised_classification[i][2] : #if pure and denoised classifications match
311
    b+=1 #increment b by 1
312
313
print("accuracy with denoising = ",end="") #priniting text
314
print(np.float64(b)/pure_classification.shape[0])#printing accuracy. divide b by the total number of classifications
315
316
317
print("DIRECT CLASSIFICATION USING ACCELEROMETER DATA")
318
##DIRECT CLASSIFICATION USING ACCELEROMETER DATA
319
320
#RELATIVE ACCELERATION CALCULATION
321
rel_acc_noisy = np.zeros((int(noisy_acc.shape[0]*0.5),noisy_acc.shape[1]),dtype=np.float64) # Creating array to store relative accelerations between noisy data
322
rel_acc_pure = np.zeros((int(pure_acc.shape[0]*0.5),pure_acc.shape[1]),dtype=np.float64) # Creating array to store relative accelerations between pure data
323
324
a=0 #a is assigned to 0
325
326
for i in range(rel_acc_pure.shape[0]):
327
  rel_acc_noisy[i] = noisy_acc[a+1] - noisy_acc[a] #evaluating the relative accelerations between adjacent noisy_acc
328
  rel_acc_pure[i]  = pure_acc[a+1] - pure_acc[a] #evaluating the relative accelerations between adjacent pure_acc
329
  a+=2 #incrementing a by 2 to get the next 2 accelerations
330
rel_acc_noisy = rel_acc_noisy.reshape(rel_acc_noisy.shape[0],rel_acc_noisy.shape[1],1) #reshaping the array
331
332
print("Getting the pure classifications from pure_acc")
333
### Getting the pure classifications from pure_acc
334
#For relative acceleration calculation we assume 2 simultaneous accelerometer-time signals
335
336
time = 1/360 #time taken between 2 readings. Sampling rate = 360Hz
337
338
pure_classification2 = np.zeros((int(pure_acc.shape[0]*0.5),3),np.float64) #creatng array of zeros for classinfiying the pure training signals
339
340
v = np.zeros(pure_acc.shape,np.float64) #velocity
341
disp = np.zeros(pure_acc.shape,np.float64) #displacement
342
343
344
for i in range(0,pure_acc.shape[0]): #iterating over the training pure signals
345
  for j in range(1,pure_acc.shape[1]):  #going over each sample
346
      v[i][j] = v[i][j-1] + (((pure_acc[i][j-1]+pure_acc[i][j])/2) * (time)) #integrating for velocity
347
348
for i in range(0,pure_acc.shape[0]): #iterating over velocities
349
  for j in range(1,pure_acc.shape[1]):   #double integration.
350
      disp[i][j] = disp[i][j-1] + (((v[i][j-1]+v[i][j])/2) * (time))#integrating for displacement
351
352
for i in range(0,disp.shape[0],2):#going over adjacent displacement array
353
  idr = np.zeros(disp.shape[1],np.float64)#creating array of zeros for IDRs
354
355
  for j in range(disp.shape[1]):#going over adjacent displacement arrays
356
    idr[j] = ( np.abs(disp[i][j]-disp[i+1][j]) )/(floor_height)#calculating IDRs
357
358
  '''
359
 if idr < 0.007       => Immediate Occupancy
360
 if idr 0.007 to 0.05 => Life Safety
361
 if idr >0.05         => Collapse prevention
362
 '''
363
  scores=np.array([0,0,0])#scores
364
  '''
365
 io_score=0th index
366
 ls_score=1st index
367
 cp_score=2nd index
368
 '''
369
  for k in range (idr.shape[0]):#calculating scores for function/array of IDRs
370
    if idr[k]<0.007:#if IDR<0.007
371
      scores[0]+=1 #increment scores[0] by 1
372
    elif idr[k]>0.05:#if IDR>0.05
373
      scores[2]+=1 #increment scores[2] by 1
374
    else:#if 0.007<IDR<0.05
375
      scores[1]+=1#increment scores[2] by 1
376
377
  scores = np.floor(scores/(np.amax(scores))) #putting 1 in position that has maximum score and 0 in other positions
378
  pure_classification2[int(i/2)]=scores #adding scores to denoised_classification array
379
  scores=np.array([0,0,0])#resetting scores array
380
381
print("CNN FOR DIRECT CLASSIFICATION")
382
###CNN FOR DIRECT CLASSIFICATION
383
model3 = keras.Sequential([ #sequential model
384
   keras.layers.Conv1D(32, 32, strides=1, activation='relu'), #convolutional layer, kernal size=32, number of filters = 32
385
   keras.layers.Conv1D(32, 32, strides=1, activation='relu'),#convolutional layer, kernal size=32, number of filters = 32
386
   keras.layers.MaxPool1D(pool_size=8),#maxpool layer, pooling filtersize = 8
387
   keras.layers.Flatten(), #flatten layer
388
   keras.layers.Dense(512, activation='relu'),#fully connected layer, size = 512
389
   keras.layers.Dense(512, activation='relu'), #fully connected layer, size = 512
390
   keras.layers.Dense(3, activation='softmax')]) #fully connected layer, size = 3 to get output as one of the 3 classes
391
392
model3.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.SGD(),metrics=['accuracy']) # creating model using SGD optimizer and catagorical cross-entropy loss function.
393
394
model3.fit(rel_acc_noisy, pure_classification2, epochs=5, batch_size=16) # training the model. 5 epochs , batch size of 16
395
396
print("ANN FOR DIRECT CLASSIFICATION")
397
#ANN FOR DIRECT CLASSIFICATION
398
model4 = keras.Sequential([ #sequential model
399
   keras.layers.Flatten(),#flatten layer
400
   keras.layers.Dense(512, activation='relu'),#fully connected layer, size = 512
401
   keras.layers.Dense(512, activation='relu'),#fully connected layer, size = 512
402
   keras.layers.Dense(512, activation='relu'),#fully connected layer, size = 512
403
   keras.layers.Dense(3, activation='softmax')])#fully connected layer, size = 3 to get output as one of the 3 classes
404
405
model4.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(),metrics=['accuracy']) # creating model using SGD optimizer and catagorical cross-entropy loss function.
406
407
model4.fit(rel_acc_noisy, pure_classification2, epochs=5, batch_size=4)# training the model. 5 epochs , batch size of 4
408
409
print("final accuracy testing on unseen data!")
410
e = np.zeros((int(test_noisy_acc.shape[0]*0.5),test_noisy_acc.shape[1]),dtype=np.float64) #creating empty array  for storing test_noisy dataset's relative accelerations.
411
#e is the relative acceleration for the test_noisy_acc
412
a=0 # a   =   0
413
414
for i in range(e.shape[0]): # iterating over test_noisy dataset
415
  e[i] = test_noisy_acc[a+1] - test_noisy_acc[a] # evaluating relative acceletation and storing it in e
416
  a+=2 # increment a by 2 to get the next 2 accelerations.
417
e = e.reshape(e.shape[0],e.shape[1],1) # reshape the array e.
418
419
#EVALUATING MODELS
420
model3.evaluate(e,pure_classification) # evaluating the performance of the CNN on unseen data