实验部分：

​ 图数据的构建：

节点嵌入：

``````class Embedding(nn.Module):
def __init__(self, args):
super(Embedding, self).__init__()
self.emb_size = args.embedding_size
self.ndf = args.nf_cnn
self.args = args

# Input (48x48x1)
self.conv1 = nn.Conv2d(1, self.ndf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.ndf)

# Input (48x48x64)
self.conv2 = nn.Conv2d(self.ndf, int(self.ndf * 1.5), kernel_size=3, bias=False)
self.bn2 = nn.BatchNorm2d(int(self.ndf * 1.5))

# Input (10x10x96)
self.conv3 = nn.Conv2d(int(self.ndf * 1.5), self.ndf * 2, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.ndf * 2)
self.drop_3 = nn.Dropout2d(0.4)

# Input (4x4x128)
self.conv4 = nn.Conv2d(self.ndf * 2, self.ndf * 4, kernel_size=3, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(self.ndf * 4)
self.drop_4 = nn.Dropout2d(0.5)

# Input (2x2x256)
# self.fc1 = nn.Linear(self.ndf * 4 * 5 * 5, self.emb_size, bias=True)
self.fc1 = nn.Linear(self.ndf * 4 * 2 * 2, self.emb_size, bias=True)  # 全连接层
# self.fc1 = nn.Linear(self.ndf * 4, self.emb_size, bias=True)
self.bn_fc = nn.BatchNorm1d(self.emb_size)  # batch_normalization

def forward(self, input):
e1 = F.max_pool2d(self.bn1(self.conv1(input)), 2)   # input:(48x48x1)  output:(24x24x64)
x = F.leaky_relu(e1, 0.2, inplace=True)
e2 = F.max_pool2d(self.bn2(self.conv2(x)), 2)       # input:(24x24x64)  output:(11x11x96)
x = F.leaky_relu(e2, 0.2, inplace=True)
e3 = F.max_pool2d(self.bn3(self.conv3(x)), 2)       # input:(11x11x96)  output:(5x5x128)
x = F.leaky_relu(e3, 0.2, inplace=True)
x = self.drop_3(x)
e4 = F.max_pool2d(self.bn4(self.conv4(x)), 2)       # input:(5x5x128) output:(2x2x256)
x = F.leaky_relu(e4, 0.2, inplace=True)
x = self.drop_4(x)
x_size = x.size()
x = x.contiguous()
x = x.view(x_size[0], -1)
output = self.bn_fc(self.fc1(x))                    # output:(1 x embed_size)

return output``````

THE END