Sure! Here is a small example concatenating the outputs of two linear layers:
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.features1 = nn.Sequential(
nn.Conv2d(3, 6, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 12, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.features2 = nn.Sequential(
nn.Conv2d(1, 6, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 12, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc1 = nn.Linear(12*6*6, 64)
self.fc2 = nn.Linear(12*6*6, 64)
self.fc_out = nn.Linear(128, 10)
def forward(self, x1, x2):
x1 = self.features1(x1)
x1 = x1.view(x1.size(0), -1)
x1 = F.relu(self.fc1(x1))
x2 = self.features2(x2)
x2 = x2.view(x2.size(0), -1)
x2 = F.relu(self.fc2(x2))
# Concatenate in dim1 (feature dimension)
x = torch.cat((x1, x2), 1)
x = self.fc_out(x)
return x
model = MyModel()
x1 = torch.randn(2, 3, 24, 24)
x2 = torch.randn(2, 1, 24, 24)
output = model(x1, x2)
Note that you could also concatenate/sum/average the outputs of the features
modules.
I’m not sure what would work best in your use case, so you might want to play around a bit.