|
a |
|
b/HINT/module.py |
|
|
1 |
import torch |
|
|
2 |
import torch.nn as nn |
|
|
3 |
import torch.nn.functional as F |
|
|
4 |
from copy import deepcopy |
|
|
5 |
from torch.autograd import Variable |
|
|
6 |
from torch.utils import data |
|
|
7 |
from torch.utils.data import SequentialSampler |
|
|
8 |
import matplotlib.pyplot as plt |
|
|
9 |
import numpy as np |
|
|
10 |
sigmoid = torch.nn.Sigmoid() |
|
|
11 |
torch.manual_seed(0) |
|
|
12 |
|
|
|
13 |
from HINT.gnn_layers import GraphConvolution, GraphAttention |
|
|
14 |
torch.manual_seed(4) |
|
|
15 |
np.random.seed(1) |
|
|
16 |
|
|
|
17 |
class Highway(nn.Module): |
|
|
18 |
def __init__(self, size, num_layers): |
|
|
19 |
super(Highway, self).__init__() |
|
|
20 |
self.num_layers = num_layers |
|
|
21 |
self.nonlinear = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)]) |
|
|
22 |
self.linear = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)]) |
|
|
23 |
self.gate = nn.ModuleList([nn.Linear(size, size) for _ in range(num_layers)]) |
|
|
24 |
self.f = F.relu |
|
|
25 |
|
|
|
26 |
def forward(self, x): |
|
|
27 |
""" |
|
|
28 |
:param x: tensor with shape of [batch_size, size] |
|
|
29 |
:return: tensor with shape of [batch_size, size] |
|
|
30 |
applies σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x)) transformation | G and Q is affine transformation, |
|
|
31 |
f is non-linear transformation, σ(x) is affine transformation with sigmoid non-linearition |
|
|
32 |
and ⨀ is element-wise multiplication |
|
|
33 |
""" |
|
|
34 |
for layer in range(self.num_layers): |
|
|
35 |
gate = F.sigmoid(self.gate[layer](x)) |
|
|
36 |
nonlinear = self.f(self.nonlinear[layer](x)) |
|
|
37 |
linear = self.linear[layer](x) |
|
|
38 |
x = gate * nonlinear + (1 - gate) * linear |
|
|
39 |
return x |
|
|
40 |
|
|
|
41 |
|
|
|
42 |
|
|
|
43 |
|
|
|
44 |
|
|
|
45 |
|
|
|
46 |
class GCN(nn.Module): |
|
|
47 |
def __init__(self, nfeat, nhid, nclass, dropout, init): |
|
|
48 |
super(GCN, self).__init__() |
|
|
49 |
|
|
|
50 |
self.gc1 = GraphConvolution(nfeat, nhid, init=init) |
|
|
51 |
self.gc2 = GraphConvolution(nhid, nclass, init=init) |
|
|
52 |
self.dropout = dropout |
|
|
53 |
|
|
|
54 |
def bottleneck(self, path1, path2, path3, adj, in_x): |
|
|
55 |
return F.relu(path3(F.relu(path2(F.relu(path1(in_x, adj)), adj)), adj)) |
|
|
56 |
|
|
|
57 |
def forward(self, x, adj): |
|
|
58 |
x = F.dropout(F.relu(self.gc1(x, adj)), self.dropout, training=self.training) |
|
|
59 |
x = self.gc2(x, adj) |
|
|
60 |
return x |
|
|
61 |
# return F.log_softmax(x, dim=1) |
|
|
62 |
|
|
|
63 |
|
|
|
64 |
|
|
|
65 |
|
|
|
66 |
class GCN_drop_in(nn.Module): |
|
|
67 |
def __init__(self, nfeat, nhid, nclass, dropout, init): |
|
|
68 |
super(GCN_drop_in, self).__init__() |
|
|
69 |
|
|
|
70 |
self.gc1 = GraphConvolution(nfeat, nhid, init=init) |
|
|
71 |
self.gc2 = GraphConvolution(nhid, nclass, init=init) |
|
|
72 |
self.dropout = dropout |
|
|
73 |
|
|
|
74 |
def bottleneck(self, path1, path2, path3, adj, in_x): |
|
|
75 |
return F.relu(path3(F.relu(path2(F.relu(path1(in_x, adj)), adj)), adj)) |
|
|
76 |
|
|
|
77 |
def forward(self, x, adj): |
|
|
78 |
x = F.dropout(x, self.dropout, training=self.training) |
|
|
79 |
x = F.dropout(F.relu(self.gc1(x, adj)), self.dropout, training=self.training) |
|
|
80 |
x = self.gc2(x, adj) |
|
|
81 |
|
|
|
82 |
return F.log_softmax(x, dim=1) |
|
|
83 |
|
|
|
84 |
class GAT(nn.Module): |
|
|
85 |
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): |
|
|
86 |
super(GAT, self).__init__() |
|
|
87 |
self.dropout = dropout |
|
|
88 |
|
|
|
89 |
self.attentions = [GraphAttention(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] |
|
|
90 |
for i, attention in enumerate(self.attentions): |
|
|
91 |
self.add_module('attention_{}'.format(i), attention) |
|
|
92 |
|
|
|
93 |
self.out_att = GraphAttention(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False) |
|
|
94 |
|
|
|
95 |
def forward(self, x, adj): |
|
|
96 |
x = F.dropout(x, self.dropout, training=self.training) |
|
|
97 |
x = torch.cat([att(x, adj) for att in self.attentions], dim=1) |
|
|
98 |
x = F.dropout(x, self.dropout, training=self.training) |
|
|
99 |
x = F.elu(self.out_att(x, adj)) |
|
|
100 |
return F.log_softmax(x, dim=1) |
|
|
101 |
|
|
|
102 |
|
|
|
103 |
|
|
|
104 |
|
|
|
105 |
if __name__ == "__main__": |
|
|
106 |
gnn = GCN( |
|
|
107 |
nfeat = 20, |
|
|
108 |
nhid = 30, |
|
|
109 |
nclass = 1, |
|
|
110 |
dropout = 0.6, |
|
|
111 |
init = 'uniform') |
|
|
112 |
|
|
|
113 |
|
|
|
114 |
|
|
|
115 |
|
|
|
116 |
|
|
|
117 |
|
|
|
118 |
|
|
|
119 |
|
|
|
120 |
|