Hi,
I’m trying to change the used size of channels at layer4 of resnet 18, I wrote something like the follwoing piece of code. but when I run it complains about the size mismatch at the fc layer, I don’t know exactly how should I realize which size should I use.
class Inception(nn.Module):
def __init__(self, in_channels=2048): super(Inception, self).__init__() self.paral_0 = nn.Sequential( nn.Conv2d(256, 64, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.AdaptiveAvgPool2d(output_size=(1, 1)) ) self.fc0 = nn.Linear(in_features=256, out_features=83, bias=True) ....
def forward(self, x):
y0 = self.paral_0(x) y0 = y0.view(y0.size(0), -1) y0 = self.fc0(y0) ....
and this is the error:
Traceback (most recent call last):
File " generated_train/0.py", line 471, in
summary(model, (3, 224, 224))
File " lib/python3.6/site-packages/torchsummary/torchsummary.py", line 72, in summary
model(*x)
File " lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in call
result = self.forward(*input, **kwargs)
File " generated_train/model/net.py", line 533, in forward
x = self.layer4(x)
File " lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in call
result = self.forward(*input, **kwargs)
File " generated_train/model/net.py", line 303, in forward
y0 = self.fc0(y0)
File " lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in call
result = self.forward(*input, **kwargs)
File " lib/python3.6/site-packages/torch/nn/modules/linear.py", line 67, in forward
return F.linear(input, self.weight, self.bias)
File " lib/python3.6/site-packages/torch/nn/functional.py", line 1352, in linear
ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
RuntimeError: size mismatch, m1: [2 x 64], m2: [256 x 83] at /pytorch/aten/src/THC/generic/THCTensorMathBlas.cu:266