# MatchNet论文复现过程记录

## I.Network architecture

### C. MatchNet in training

``````import torch
import torch.nn as nn

class FeatureNet(nn.Module):
"""特征提取网络
"""
def __init__(self):
super(FeatureNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(),
nn.ReLU(),
nn.ReLU(),
nn.ReLU(),
nn.ReLU(),
)

def forward(self, x):
return self.features(x)

class MetricNet(nn.Module):
"""度量网络
"""
def __init__(self):
super(MetricNet, self).__init__()
self.features = nn.Sequential(
nn.Linear(in_features=6272, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=2),
# nn.Softmax(dim=1)
''' 这里原本应该接Softmax，但损失函数采用的是交叉熵损失，
而Pytorch中的torch.nn.CrossEntropyLoss()方法包括Softmax，
具体可参考文档https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html?highlight=nn%20crossentropyloss#torch.nn.CrossEntropyLoss
'''
)

def forward(self, x):
return self.features(x)

class MatchNet(nn.Module):
def __init__(self):
super(MatchNet, self).__init__()

# 只添加一个特征提取网络
self.input_ = FeatureNet()
self.input_.apply(weights_init)

self.matric_network = MetricNet()
self.matric_network.apply(weights_init)

def forward(self, x):
"""x.shape = (2, C, H, W),即两个patch
"""
# 两个patch进入同一个FeatureNet，相当于two-tower sharing same parameters
feature1 = self.input_(x[0]).reshape((x[0].shape[0], -1)) #[256, 3136]
feature2 = self.input_(x[1]).reshape((x[1].shape[0], -1))

features = torch.cat((feature1, feature2), 1) #[256, 6272]

return self.matric_network(features)

def weights_init(m):
'''
自定义权重初始化
'''
if isinstance(m, nn.Conv2d):
nn.init.orthogonal_(m.weight.data, gain=0.6)
try:
nn.init.constant_(m.bias.data, 0.01)
except Exception:
pass
return
``````

## 参考文献

1. Han等, 《Matchnet: Unifying feature and metric learning for patch-based matching》. ↩︎

2. A. Krizhevsky, I. Sutskever, and G. E. Hinton. ImageNet classification with deep convolutional neural networks. In NIPS, 2012. ↩︎

THE END

)">