The size of y1, y2, y3 is different so ‘torch.cat’ is not used.
input_shape = [5, 256, 4, 64, 64]
y1_shape = [5, 128, 4, 64, 64]
y2_shape = [5, 128, 2, 62, 62]
y3_shape = [5, 128, 1, 62, 62]
The code is shown below.
class _test(nn.Sequential):
def __init__(self, num_input_features):
super(_test, self).__init__()
self.b1 = nn.Sequential(OrderedDict([
('norm', nn.BatchNorm3d(num_input_features)),
('relu', nn.ReLU(inplace=True)),
('conv', nn.Conv3d(num_input_features, 128,
kernel_size=(1, 1, 1), stride=1, bias=False))
]))
self.b2 = nn.Sequential(OrderedDict([
('norm', nn.BatchNorm3d(num_input_features)),
('relu', nn.ReLU(inplace=True)),
('conv', nn.Conv3d(num_input_features, 128,
kernel_size=(3, 3, 3), stride=1, bias=False))
]))
self.b3 = nn.Sequential(OrderedDict([
('norm', nn.BatchNorm3d(num_input_features)),
('relu', nn.ReLU(inplace=True)),
('conv', nn.Conv3d(num_input_features, 128,
kernel_size=(4, 3, 3), stride=1, bias=False))
]))
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
return torch.cat([y1, y2, y3], 1)
Please let me know if there is a way.