Missmatch error with Pytorch CNN and Linear Layer model

I have built a network and it seems that my first fully connected layer has not been set the correct input size, my problem is I don’t know how to properly set it. I keep getting the error:

in inner(_it, _timer)

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1368 if input.dim() == 2 and bias is not None:
1369 # fused op is marginally faster
→ 1370 ret = torch.addmm(bias, input, weight.t())
1371 else:
1372 output = input.matmul(weight.t())

RuntimeError: size mismatch, m1: [5 x 196608], m2: [26912 x 512] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:136

This is the code for my network I would appreciate any help I can get

# Convolutional neural network
class ConvNet(nn.Module):
    
    def __init__(self, num_classes=10):
        super(ConvNet, self).__init__()

  
        # Add network layers here
        # Layer 1
        self.conv1 = nn.Conv2d(3,16, (3,3))
        self.pool = nn.MaxPool2d(2,2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=0.3)


            # Layer 2
        self.conv2 = nn.Conv2d(16,24, (4,4))
        self.pool = nn.MaxPool2d(2,2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=0.3)

        conv = self.conv2
        print(conv)

            # Layer 3
        self.conv3 = nn.Conv2d(24,32, (4,4))
        self.pool = nn.MaxPool2d(2,2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=0.3)

            # Layer 4 - Fully connected
        self.fc1 = nn.Linear(32 * 29 * 29, 512)
        

        self.fc2 = nn.Linear(512, num_classes)
        self.final = nn.Softmax(dim=1)
        
    def forward(self, x):
       out = x.reshape(x.size(0), -1) # TODO what does this do? Why do we need it?
       out = self.fc1(out)

        # Complete the graph

       return out
       

net = ConvNet()

loss_function = nn.CrossEntropyLoss()

optimizer = optim.SGD(net.parameters(), lr=0.0001,momentum=0.9)

print(len(test_loader))
def train_model_epochs(num_epochs):
for epoch in range(10):

running_loss = 0.0

for i, data in enumerate(test_loader, 0):
images, labels = data

optimizer.zero_grad()

outputs = net(images)

loss = loss_function(outputs, labels)

loss.backward()

optimizer.step()

running_loss =+ loss.item()
if i%1000 == 999:
  print('Epoch / Batch [%d / %d] - Loss: %.3f' %
      (epoch + 1, i + 1, running_loss / 1000))
  running_loss = 0.0    

print(“Training finished”)

cpu_train_time = timeit.timeit(
“train_model_epochs(num_epochs)”,
setup=“num_epochs=10”,
number=1,
globals=globals(),
)

print("Training time: " + cpu_train_time)

In your forward method you are directly flattening your input x and passing it to the linear layer skipping all conv layers.
I assume you would like to pass the input first to self.conv1, self.pool etc.
You are also redefining the modules inside the __init__ method, so use different names for the layers.

This tutorial might be helpful.

Ahh yes that makes sense!

I assume you would like to pass the input first to self.conv1 , self.pool etc.

For the above, I am unsure if my method is accurate in terms of syntax:

def forward(self, x):
out = x.reshape(x.size(0), -1)
out = self.conv1(out)
out = self.pool1(out)
out = self.relu1(out)

   out = self.conv2(out)
   out = self.pool2(out)
   out = self.relu2(out)

   out = self.conv3(out)
   out = self.pool3(out)
   out = self.relu3(out)

   out = self.fc1(out)

With this new forward method I get the error:

Expected 4-dimensional input for 4-dimensional weight 16 3 3 3, but got 2-dimensional input of size [5, 196608] instead

def forward(self, x):

   out = self.conv1(out)
   out = self.conv2(out)
   out = self.conv3(out)
   
   out = x.reshape(x.size(0), -1)

   out = self.fc1(out)

I changed the model to use sequential layers however i am getting the error:

local variable ‘out’ referenced before assignment

Sorry to bother you I am at a loss

Your forward method uses x as the variable name, while you are passing out to the first layer.

1 Like