Diff of /LSTM-VAE.py [000000] .. [6dc6a3]

Switch to unified view

a b/LSTM-VAE.py
1
import numpy as np
2
import pandas as pd
3
import random
4
5
from keras.models import Sequential
6
from keras.layers.core import Dense, Activation
7
8
from keras.layers.recurrent import LSTM
9
from keras.layers import Input, LSTM, RepeatVector, Masking, TimeDistributed, Lambda
10
from keras.losses import mse, binary_crossentropy, mean_squared_error
11
from keras.models import Model
12
from keras import backend as K
13
14
15
#define variables
16
latent_dim=3
17
Intermediate_dim=6
18
nb_epoch=1000
19
batch_size=100
20
optimizer='adadelta'
21
#X is the data matrix
22
23
#encoder LSTM
24
inputs = Input(shape=(7, 1), name='InputTimeSeries')  #(timesteps, input_dim)
25
encoded = LSTM(Intermediate_dim, name='EncoderLSTM')(inputs) # intermediate dimension
26
27
#Creating mean and sigma vectors
28
z_mean = Dense(latent_dim, name='MeanVector' )(encoded)
29
z_log_sigma = Dense(latent_dim,name='SigmaVector')(encoded)
30
31
#latent vector sampling
32
def sampling(args):
33
    z_mean, z_log_sigma = args
34
    batch = K.shape(z_mean)[0]
35
    dim = K.int_shape(z_mean)[1]
36
    epsilon = K.random_normal(shape=(batch, dim))
37
    return z_mean + K.exp(0.5 * z_log_sigma) * epsilon
38
39
z = Lambda(sampling, name='LatentVector', output_shape=(latent_dim,))([z_mean, z_log_sigma])  
40
41
#VAE Loss
42
def vae_loss(inputs, decoded):
43
   
44
    xent_loss = K.sum(K.binary_crossentropy(inputs, decoded), axis=1)
45
    kl_loss = - 0.5 * K.sum(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1) 
46
    return K.mean(xent_loss + kl_loss)
47
48
49
#decoder LSTM
50
decoded = RepeatVector(7, name='EmbeddingtoTimeSeries')(z) #timesteps
51
decoded = LSTM(Intermediate_dim,name='DecoderLSTM1', return_sequences=True)(decoded) #intermediate dimensions
52
decoded = LSTM(1,name='DecoderLSTM2', return_sequences=True)(decoded) #input_dim
53
54
#decoded=TimeDistributed(Dense(1, name='Wrapper'), name='TimeDistributed')(decoded)  
55
56
v_autoencoder = Model(inputs, decoded)
57
encoder = Model(inputs, z_mean)  
58
#v_autoencoder.summary()
59
60
v_autoencoder.compile(optimizer=optimizer, loss=vae_loss)
61
v_autoencoder.fit(X,X,nb_epoch=nb_epoch,batch_size=batch_size)