|
a |
|
b/DAE-DCAP.py |
|
|
1 |
|
|
|
2 |
|
|
|
3 |
import tensorflow as tf |
|
|
4 |
import numpy as np |
|
|
5 |
import matplotlib.pyplot as plt |
|
|
6 |
import pandas as pd |
|
|
7 |
from sklearn.cluster import KMeans |
|
|
8 |
from sklearn.metrics import silhouette_score |
|
|
9 |
|
|
|
10 |
with open(r"simulation.csv", 'r') as f: |
|
|
11 |
data = pd.read_csv(f) |
|
|
12 |
|
|
|
13 |
#print(data.shape) |
|
|
14 |
tcga_input=np.transpose(data) |
|
|
15 |
print(tcga_input.shape[1]) |
|
|
16 |
length1 = tcga_input.shape[1] |
|
|
17 |
learning_rate = 0.0001 |
|
|
18 |
training_epochs = 100 |
|
|
19 |
batch_size = 125 |
|
|
20 |
display_step = 2 |
|
|
21 |
examples_to_show = 10 |
|
|
22 |
n_input = tcga_input.shape[1] |
|
|
23 |
|
|
|
24 |
|
|
|
25 |
X = tf.placeholder("float", [None, n_input]) |
|
|
26 |
|
|
|
27 |
|
|
|
28 |
n_hidden_1 = 200 |
|
|
29 |
n_hidden_2 = 50 |
|
|
30 |
n_hidden_3 = 2 |
|
|
31 |
|
|
|
32 |
weights = { |
|
|
33 |
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), |
|
|
34 |
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), |
|
|
35 |
'encoder_h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])), |
|
|
36 |
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_2])), |
|
|
37 |
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), |
|
|
38 |
'decoder_h3': tf.Variable(tf.random_normal([n_hidden_1, n_input])), |
|
|
39 |
|
|
|
40 |
|
|
|
41 |
} |
|
|
42 |
biases = { |
|
|
43 |
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), |
|
|
44 |
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), |
|
|
45 |
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])), |
|
|
46 |
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_2])), |
|
|
47 |
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_1])), |
|
|
48 |
'decoder_b3': tf.Variable(tf.random_normal([n_input])), |
|
|
49 |
} |
|
|
50 |
|
|
|
51 |
|
|
|
52 |
def encoder(x): |
|
|
53 |
layer_1 = tf.nn.tanh(tf.add(tf.matmul(x, weights['encoder_h1']), |
|
|
54 |
biases['encoder_b1'])) |
|
|
55 |
layer_2 = tf.nn.tanh(tf.add(tf.matmul(layer_1, weights['encoder_h2']), |
|
|
56 |
biases['encoder_b2'])) |
|
|
57 |
layer_3 = tf.nn.tanh(tf.add(tf.matmul(layer_2, weights['encoder_h3']), |
|
|
58 |
biases['encoder_b3'])) |
|
|
59 |
return layer_3 |
|
|
60 |
|
|
|
61 |
|
|
|
62 |
|
|
|
63 |
def decoder(x): |
|
|
64 |
layer_1 = tf.nn.tanh(tf.add(tf.matmul(x, weights['decoder_h1']), |
|
|
65 |
biases['decoder_b1'])) |
|
|
66 |
layer_2 = tf.nn.tanh(tf.add(tf.matmul(layer_1, weights['decoder_h2']), |
|
|
67 |
biases['decoder_b2'])) |
|
|
68 |
layer_3 = tf.nn.tanh(tf.add(tf.matmul(layer_2, weights['decoder_h3']), |
|
|
69 |
biases['decoder_b3'])) |
|
|
70 |
return layer_3 |
|
|
71 |
|
|
|
72 |
|
|
|
73 |
encoder_op = encoder(X) |
|
|
74 |
decoder_op = decoder(encoder_op) |
|
|
75 |
|
|
|
76 |
|
|
|
77 |
y_pred = decoder_op |
|
|
78 |
y_true = X |
|
|
79 |
|
|
|
80 |
|
|
|
81 |
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) |
|
|
82 |
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) |
|
|
83 |
|
|
|
84 |
with tf.Session() as sess: |
|
|
85 |
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: |
|
|
86 |
init = tf.initialize_all_variables() |
|
|
87 |
else: |
|
|
88 |
init = tf.global_variables_initializer() |
|
|
89 |
sess.run(init) |
|
|
90 |
|
|
|
91 |
total_batch = int(len(tcga_input)/batch_size) |
|
|
92 |
for epoch in range(training_epochs): |
|
|
93 |
for i in range(total_batch): |
|
|
94 |
batch_xs = tcga_input[((i) * batch_size):((i + 1) * batch_size)] + 0.3 * np.random.rand(length1) #added nosie |
|
|
95 |
# Run optimization op (backprop) and cost op (to get loss value) |
|
|
96 |
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) |
|
|
97 |
if epoch % display_step == 0: |
|
|
98 |
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c)) |
|
|
99 |
if epoch == training_epochs - 1: |
|
|
100 |
fea_output = sess.run([encoder_op], feed_dict={X: tcga_input}) |
|
|
101 |
# print(fea_output) |
|
|
102 |
print(np.array(fea_output).shape) |
|
|
103 |
np.savetxt(r'fea.csv', np.array(fea_output[0]), delimiter=',') |
|
|
104 |
dd = np.array(fea_output[0]) |
|
|
105 |
print("Optimization Finished!") |
|
|
106 |
print(dd.shape) |
|
|
107 |
clf = KMeans(n_clusters=2) |
|
|
108 |
clf.fit(dd) |
|
|
109 |
centers = clf.cluster_centers_ |
|
|
110 |
labels = clf.labels_ |
|
|
111 |
silhouetteScore = silhouette_score(dd, labels, metric='euclidean') |
|
|
112 |
print(centers) |
|
|
113 |
print(silhouetteScore) |
|
|
114 |
# encode_decode = sess.run( |
|
|
115 |
# y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) |
|
|
116 |
# f, a = plt.subplots(2, 10, figsize=(10, 2)) #return fig,axes |
|
|
117 |
# for i in range(examples_to_show): |
|
|
118 |
# a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) |
|
|
119 |
# a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) |
|
|
120 |
# plt.show() |