Hi,
I am working with Cat and Dog images and I have defined my model like this:
class CatAndDogConvNet(nn.Module):
def __init__(self):
super().__init__()
# onvolutional layers (3,16,32)
self.conv1 = nn.Conv2d(in_channels = 3, out_channels = 16, kernel_size=(5, 5), stride=2, padding=1)
self.maxpool = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size=(5, 5), stride=2, padding=1)
self.maxpool = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size=(3, 3), padding=1)
self.maxpool = nn.MaxPool2d(2)
# conected layers
self.fc1 = nn.Linear(in_features= 64 * 6 * 6, out_features=500)
self.fc2 = nn.Linear(in_features=500, out_features=50)
self.fc3 = nn.Linear(in_features=50, out_features=2)
self.maxpool = nn.MaxPool2d(2)
def forward(self, X):
X = self.maxpool(F.relu(self.conv1(X)))
# print(X.shape)
X = self.maxpool(F.relu(self.conv2(X)))
# print(X.shape)
X = self.maxpool(F.relu(self.conv3(X)))
# print(X.shape)
X = X.view(X.shape[0], -1)
# print(X.shape)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return X
During training, the output after the maxpool
is:
torch.Size([100, 16, 55, 55])
torch.Size([100, 32, 13, 13])
torch.Size([100, 64, 6, 6])
torch.Size([100, 2304])
However, during inference, the output shapes are:
[1, 16, 111, 111]
[1, 16, 55, 55]
[1, 32, 27, 27]
[1, 64, 27, 27]
I don’t understand what’s going on here