forked from hengruizhang98/CCA-SSG
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
80 lines (56 loc) · 2.08 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn import GraphConv
class LogReg(nn.Module):
def __init__(self, hid_dim, out_dim):
super(LogReg, self).__init__()
self.fc = nn.Linear(hid_dim, out_dim)
def forward(self, x):
ret = self.fc(x)
return ret
class MLP(nn.Module):
def __init__(self, nfeat, nhid, nclass, use_bn=True):
super(MLP, self).__init__()
self.layer1 = nn.Linear(nfeat, nhid, bias=True)
self.layer2 = nn.Linear(nhid, nclass, bias=True)
self.bn = nn.BatchNorm1d(nhid)
self.use_bn = use_bn
self.act_fn = nn.ReLU()
def forward(self, _, x):
x = self.layer1(x)
if self.use_bn:
x = self.bn(x)
x = self.act_fn(x)
x = self.layer2(x)
return x
class GCN(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, n_layers):
super().__init__()
self.n_layers = n_layers
self.convs = nn.ModuleList()
self.convs.append(GraphConv(in_dim, hid_dim, norm='both'))
if n_layers > 1:
for i in range(n_layers - 2):
self.convs.append(GraphConv(hid_dim, hid_dim, norm='both'))
self.convs.append(GraphConv(hid_dim, out_dim, norm='both'))
def forward(self, graph, x):
for i in range(self.n_layers - 1):
x = F.relu(self.convs[i](graph, x))
x = self.convs[-1](graph, x)
return x
class CCA_SSG(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, n_layers, use_mlp = False):
super().__init__()
if not use_mlp:
self.backbone = GCN(in_dim, hid_dim, out_dim, n_layers)
else:
self.backbone = MLP(in_dim, hid_dim, out_dim)
def get_embedding(self, graph, feat):
out = self.backbone(graph, feat)
return out.detach()
def forward(self, graph1, feat1, graph2, feat2):
h1 = self.backbone(graph1, feat1)
h2 = self.backbone(graph2, feat2)
z1 = (h1 - h1.mean(0)) / h1.std(0)
z2 = (h2 - h2.mean(0)) / h2.std(0)
return z1, z2