|
a |
|
b/AE_run.py |
|
|
1 |
#!/usr/bin/env python |
|
|
2 |
# -*- coding: utf-8 -*- |
|
|
3 |
# @Time : 2021/8/7 14:43 |
|
|
4 |
# @Author : Li Xiao |
|
|
5 |
# @File : AE_run.py |
|
|
6 |
import pandas as pd |
|
|
7 |
import numpy as np |
|
|
8 |
import argparse |
|
|
9 |
from tqdm import tqdm |
|
|
10 |
import autoencoder_model |
|
|
11 |
import torch |
|
|
12 |
import torch.utils.data as Data |
|
|
13 |
|
|
|
14 |
def setup_seed(seed): |
|
|
15 |
torch.manual_seed(seed) |
|
|
16 |
np.random.seed(seed) |
|
|
17 |
|
|
|
18 |
def work(data, in_feas, lr=0.001, bs=32, epochs=100, device=torch.device('cpu'), a=0.4, b=0.3, c=0.3, mode=0, topn=100): |
|
|
19 |
#name of sample |
|
|
20 |
sample_name = data['Sample'].tolist() |
|
|
21 |
|
|
|
22 |
#change data to a Tensor |
|
|
23 |
X,Y = data.iloc[:,1:].values, np.zeros(data.shape[0]) |
|
|
24 |
TX, TY = torch.tensor(X, dtype=torch.float, device=device), torch.tensor(Y, dtype=torch.float, device=device) |
|
|
25 |
#train a AE model |
|
|
26 |
if mode == 0 or mode == 1: |
|
|
27 |
print('Training model...') |
|
|
28 |
Tensor_data = Data.TensorDataset(TX, TY) |
|
|
29 |
train_loader = Data.DataLoader(Tensor_data, batch_size=bs, shuffle=True) |
|
|
30 |
|
|
|
31 |
#initialize a model |
|
|
32 |
mmae = autoencoder_model.MMAE(in_feas, latent_dim=100, a=a, b=b, c=c) |
|
|
33 |
mmae.to(device) |
|
|
34 |
mmae.train() |
|
|
35 |
mmae.train_MMAE(train_loader, learning_rate=lr, device=device, epochs=epochs) |
|
|
36 |
mmae.eval() #before save and test, fix the variables |
|
|
37 |
torch.save(mmae, 'model/AE/MMAE_model.pkl') |
|
|
38 |
|
|
|
39 |
#load saved model, used for reducing dimensions |
|
|
40 |
if mode == 0 or mode == 2: |
|
|
41 |
print('Get the latent layer output...') |
|
|
42 |
mmae = torch.load('model/AE/MMAE_model.pkl') |
|
|
43 |
omics_1 = TX[:, :in_feas[0]] |
|
|
44 |
omics_2 = TX[:, in_feas[0]:in_feas[0]+in_feas[1]] |
|
|
45 |
omics_3 = TX[:, in_feas[0]+in_feas[1]:in_feas[0]+in_feas[1]+in_feas[2]] |
|
|
46 |
latent_data, decoded_omics_1, decoded_omics_2, decoded_omics_3 = mmae.forward(omics_1, omics_2, omics_3) |
|
|
47 |
latent_df = pd.DataFrame(latent_data.detach().cpu().numpy()) |
|
|
48 |
latent_df.insert(0, 'Sample', sample_name) |
|
|
49 |
#save the integrated data(dim=100) |
|
|
50 |
latent_df.to_csv('result/latent_data.csv', header=True, index=False) |
|
|
51 |
|
|
|
52 |
print('Extract features...') |
|
|
53 |
extract_features(data, in_feas, epochs, topn) |
|
|
54 |
return |
|
|
55 |
|
|
|
56 |
def extract_features(data, in_feas, epochs, topn=100): |
|
|
57 |
# extract features |
|
|
58 |
#get each omics data |
|
|
59 |
data_omics_1 = data.iloc[:, 1: 1+in_feas[0]] |
|
|
60 |
data_omics_2 = data.iloc[:, 1+in_feas[0]: 1+in_feas[0]+in_feas[1]] |
|
|
61 |
data_omics_3 = data.iloc[:, 1+in_feas[0]+in_feas[1]: 1+in_feas[0]+in_feas[1]+in_feas[2]] |
|
|
62 |
|
|
|
63 |
#get all features of each omics data |
|
|
64 |
feas_omics_1 = data_omics_1.columns.tolist() |
|
|
65 |
feas_omics_2 = data_omics_2.columns.tolist() |
|
|
66 |
feas_omics_3 = data_omics_3.columns.tolist() |
|
|
67 |
|
|
|
68 |
#calculate the standard deviation of each feature |
|
|
69 |
std_omics_1 = data_omics_1.std(axis=0) |
|
|
70 |
std_omics_2 = data_omics_2.std(axis=0) |
|
|
71 |
std_omics_3 = data_omics_3.std(axis=0) |
|
|
72 |
|
|
|
73 |
#record top N features every 10 epochs |
|
|
74 |
topn_omics_1 = pd.DataFrame() |
|
|
75 |
topn_omics_2 = pd.DataFrame() |
|
|
76 |
topn_omics_3 = pd.DataFrame() |
|
|
77 |
|
|
|
78 |
#used for feature extraction, epoch_ls = [10,20,...], if epochs % 10 != 0, add the last epoch |
|
|
79 |
epoch_ls = list(range(10, epochs+10,10)) |
|
|
80 |
if epochs %10 != 0: |
|
|
81 |
epoch_ls.append(epochs) |
|
|
82 |
for epoch in tqdm(epoch_ls): |
|
|
83 |
#load model |
|
|
84 |
mmae = torch.load('model/AE/model_{}.pkl'.format(epoch)) |
|
|
85 |
#get model variables |
|
|
86 |
model_dict = mmae.state_dict() |
|
|
87 |
|
|
|
88 |
#get the absolute value of weights, the shape of matrix is (n_features, latent_layer_dim) |
|
|
89 |
weight_omics1 = np.abs(model_dict['encoder_omics_1.0.weight'].detach().cpu().numpy().T) |
|
|
90 |
weight_omics2 = np.abs(model_dict['encoder_omics_2.0.weight'].detach().cpu().numpy().T) |
|
|
91 |
weight_omics3 = np.abs(model_dict['encoder_omics_3.0.weight'].detach().cpu().numpy().T) |
|
|
92 |
|
|
|
93 |
weight_omics1_df = pd.DataFrame(weight_omics1, index=feas_omics_1) |
|
|
94 |
weight_omics2_df = pd.DataFrame(weight_omics2, index=feas_omics_2) |
|
|
95 |
weight_omics3_df = pd.DataFrame(weight_omics3, index=feas_omics_3) |
|
|
96 |
|
|
|
97 |
#calculate the weight sum of each feature --> sum of each row |
|
|
98 |
weight_omics1_df['Weight_sum'] = weight_omics1_df.apply(lambda x:x.sum(), axis=1) |
|
|
99 |
weight_omics2_df['Weight_sum'] = weight_omics2_df.apply(lambda x:x.sum(), axis=1) |
|
|
100 |
weight_omics3_df['Weight_sum'] = weight_omics3_df.apply(lambda x:x.sum(), axis=1) |
|
|
101 |
weight_omics1_df['Std'] = std_omics_1 |
|
|
102 |
weight_omics2_df['Std'] = std_omics_2 |
|
|
103 |
weight_omics3_df['Std'] = std_omics_3 |
|
|
104 |
|
|
|
105 |
#importance = Weight * Std |
|
|
106 |
weight_omics1_df['Importance'] = weight_omics1_df['Weight_sum']*weight_omics1_df['Std'] |
|
|
107 |
weight_omics2_df['Importance'] = weight_omics2_df['Weight_sum']*weight_omics2_df['Std'] |
|
|
108 |
weight_omics3_df['Importance'] = weight_omics3_df['Weight_sum']*weight_omics3_df['Std'] |
|
|
109 |
|
|
|
110 |
#select top N features |
|
|
111 |
fea_omics_1_top = weight_omics1_df.nlargest(topn, 'Importance').index.tolist() |
|
|
112 |
fea_omics_2_top = weight_omics2_df.nlargest(topn, 'Importance').index.tolist() |
|
|
113 |
fea_omics_3_top = weight_omics3_df.nlargest(topn, 'Importance').index.tolist() |
|
|
114 |
|
|
|
115 |
#save top N features in a dataframe |
|
|
116 |
col_name = 'epoch_'+str(epoch) |
|
|
117 |
topn_omics_1[col_name] = fea_omics_1_top |
|
|
118 |
topn_omics_2[col_name] = fea_omics_2_top |
|
|
119 |
topn_omics_3[col_name] = fea_omics_3_top |
|
|
120 |
|
|
|
121 |
#all of top N features |
|
|
122 |
topn_omics_1.to_csv('result/topn_omics_1.csv', header=True, index=False) |
|
|
123 |
topn_omics_2.to_csv('result/topn_omics_2.csv', header=True, index=False) |
|
|
124 |
topn_omics_3.to_csv('result/topn_omics_3.csv', header=True, index=False) |
|
|
125 |
|
|
|
126 |
if __name__ == '__main__': |
|
|
127 |
parser = argparse.ArgumentParser() |
|
|
128 |
parser.add_argument('--mode', '-m', type=int, choices=[0,1,2], default=0, |
|
|
129 |
help='Mode 0: train&intagrate, Mode 1: just train, Mode 2: just intagrate, default: 0.') |
|
|
130 |
parser.add_argument('--seed', '-s', type=int, default=0, help='Random seed, default=0.') |
|
|
131 |
parser.add_argument('--path1', '-p1', type=str, required=True, help='The first omics file name.') |
|
|
132 |
parser.add_argument('--path2', '-p2', type=str, required=True, help='The second omics file name.') |
|
|
133 |
parser.add_argument('--path3', '-p3', type=str, required=True, help='The third omics file name.') |
|
|
134 |
parser.add_argument('--batchsize', '-bs', type=int, default=32, help='Training batchszie, default: 32.') |
|
|
135 |
parser.add_argument('--learningrate', '-lr', type=float, default=0.001, help='Learning rate, default: 0.001.') |
|
|
136 |
parser.add_argument('--epoch', '-e', type=int, default=100, help='Training epochs, default: 100.') |
|
|
137 |
parser.add_argument('--latent', '-l', type=int, default=100, help='The latent layer dim, default: 100.') |
|
|
138 |
parser.add_argument('--device', '-d', type=str, choices=['cpu', 'gpu'], default='cpu', help='Training on cpu or gpu, default: cpu.') |
|
|
139 |
parser.add_argument('--a', '-a', type=float, default=0.6, help='[0,1], float, weight for the first omics data') |
|
|
140 |
parser.add_argument('--b', '-b', type=float, default=0.1, help='[0,1], float, weight for the second omics data.') |
|
|
141 |
parser.add_argument('--c', '-c', type=float, default=0.3, help='[0,1], float, weight for the third omics data.') |
|
|
142 |
parser.add_argument('--topn', '-n', type=int, default=100, help='Extract top N features every 10 epochs, default: 100.') |
|
|
143 |
args = parser.parse_args() |
|
|
144 |
|
|
|
145 |
#read data |
|
|
146 |
omics_data1 = pd.read_csv(args.path1, header=0, index_col=None) |
|
|
147 |
omics_data2 = pd.read_csv(args.path2, header=0, index_col=None) |
|
|
148 |
omics_data3 = pd.read_csv(args.path3, header=0, index_col=None) |
|
|
149 |
|
|
|
150 |
#Check whether GPUs are available |
|
|
151 |
device = torch.device('cpu') |
|
|
152 |
if args.device == 'gpu': |
|
|
153 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
154 |
|
|
|
155 |
#set random seed |
|
|
156 |
setup_seed(args.seed) |
|
|
157 |
|
|
|
158 |
if args.a + args.b + args.c != 1.0: |
|
|
159 |
print('The sum of weights must be 1.') |
|
|
160 |
exit(1) |
|
|
161 |
|
|
|
162 |
#dims of each omics data |
|
|
163 |
in_feas = [omics_data1.shape[1] - 1, omics_data2.shape[1] - 1, omics_data3.shape[1] - 1] |
|
|
164 |
omics_data1.rename(columns={omics_data1.columns.tolist()[0]: 'Sample'}, inplace=True) |
|
|
165 |
omics_data2.rename(columns={omics_data2.columns.tolist()[0]: 'Sample'}, inplace=True) |
|
|
166 |
omics_data3.rename(columns={omics_data3.columns.tolist()[0]: 'Sample'}, inplace=True) |
|
|
167 |
|
|
|
168 |
omics_data1.sort_values(by='Sample', ascending=True, inplace=True) |
|
|
169 |
omics_data2.sort_values(by='Sample', ascending=True, inplace=True) |
|
|
170 |
omics_data3.sort_values(by='Sample', ascending=True, inplace=True) |
|
|
171 |
|
|
|
172 |
#merge the multi-omics data, calculate on common samples |
|
|
173 |
Merge_data = pd.merge(omics_data1, omics_data2, on='Sample', how='inner') |
|
|
174 |
Merge_data = pd.merge(Merge_data, omics_data3, on='Sample', how='inner') |
|
|
175 |
Merge_data.sort_values(by='Sample', ascending=True, inplace=True) |
|
|
176 |
|
|
|
177 |
#train model, reduce dimensions and extract features |
|
|
178 |
work(Merge_data, in_feas, lr=args.learningrate, bs=args.batchsize, epochs=args.epoch, device=device, a=args.a, b=args.b, c=args.c, mode=args.mode, topn=args.topn) |
|
|
179 |
print('Success! Results can be seen in result file') |