I keep getting this error, but I’m not sure why. I’ve checked the shape of my input and its a tensor of shape [1, 256, 20, 24]. SO I dont know why it gives this error. Below is the code. Thanks
class MLP_model_lvl1(torch.nn.Module):
def __init__(self, pretrained_model_lvl1):
super(MLP_model_lvl1, self).__init__()
self.pretrained_model_lvl1 = pretrained_model_lvl1
self.hidden = 64
output_dims = 1
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden, self.hidden)
self.fc3 = torch.nn.Linear(self.hidden, output_dims)
self.max_pool = nn.MaxPool2d(kernel_size = 3, stride= 2, padding = 1)
def forward(self, x, y, reg_code, pretrained):
e0 = self.pretrained_model_lvl1(x, y, reg_code, pretrained) #shape [1, 256, 20, 24]
# print(e0.shape, e0)
e0 = self.max_pool(e0) #the error is here
encoded = e0.flatten(1, -1)
return encoded