RuntimeError: size mismatch, m1: [3200 x 10], m2: [3200 x 500]

i am trying to feed network but i face a run time error of size mismatch.

how i calculate the input size to the linear layer of the network. How to set the size of images batch size.
class mandal(nn.Module):
def init(self):
super(mandal,self).init()

    #layer1 inpit (136,136 *3) output (136,136)
    self.layer1=nn.Conv2d(3,16,3,stride=1,padding=1)
    
    #layer2 input (136,136*16) output (136,136*32)
    self.layer2=nn.Conv2d(16,32,3,stride=1,padding=1)
    
    #layer3 input(136,136*32) output(640,480*64)
    self.layer3=nn.Conv2d(32,64,3,stride=1,padding=1)
    
    #layer4 input(136,136*64) output(66,66*64)
    self.layer4=nn.MaxPool2d(2,2)
    
    #layer5 input(66,66*64) output (33,33*128)
    self.layer5=nn.Conv2d(64,128,3,stride=2,padding=2)
    
    #layeer6 input(33,33*128) output(17,17*128)
    self.layer6=nn.MaxPool2d(2,2)
    
    #layer7 input(17,17*128) output(8,8*192)
    self.layer7=nn.Conv2d(128,192,3,stride=2,padding=2)
    
    #layer8 input(8,8*192) output(8,8*120)
    self.layer8=nn.Conv2d(192,120,1,stride=1,padding=0)
    
    #layer9 input(8,8*94) output(8,8*64)
    self.layer9=nn.Conv2d(120,64,1,stride=1,padding=0)
    
    #layer10 input(8,8*64) output(8,8*32)
    self.layer10=nn.Conv2d(64,32,1,stride=1,padding=0)
    
    #layer11 input(8,8*32) output(8,8*16)
    self.layer11=nn.Conv2d(32,10,1,stride=1,padding=0)
    
    #layer12 input(8,8*16) output(1024)
    self.layer12=nn.Linear(10*10*32,500)
    
    #layer13 input(1024) 0utput(500)
    self.layer13=nn.Linear(500,80)
    
    #layer14 input(80) output(10)
    self.layer14=nn.Linear(80,10)
    
    #layer15 input(10) output(3)
    self.layer15=nn.Linear(10,3)
    
    #dropout p=0.20
    
    self.dropout=nn.Dropout(0.20)
    
    
def forward(self,x):
    x = F.relu(self.layer1(x))
    x = F.relu(self.layer2(x))
    x = F.relu(self.layer3(x))
    x = self.layer4(x)
    x = F.relu(self.layer5(x))
    x = self.layer6(x)
    x = F.relu(self.layer7(x))
    x = F.relu(self.layer8(x))
    x = F.relu(self.layer9(x))
    x = F.relu(self.layer10(x))
    x = self.dropout(x)
    print(x.size())
    x = F.relu(self.layer12(x))
    x = self.dropout(x)
    x = F.relu(self.layer13(x))
    x = self.dropout(x)
    x = F.relu(self.layer14(x))
    x = self.dropout(x)
    x = F.relu(self.layer15(x))
    
    
    return x

model=mandal()
print(model)
if train_on_gpu:
model.cuda()

import torch.optim as optim

specify loss function (categorical cross-entropy)

criterion = nn.CrossEntropyLoss()

specify optimizer

optimizer = optim.SGD(model.parameters(), lr=0.01)

number of epochs to train the model

n_epochs = 30

valid_loss_min = np.Inf # track change in validation loss

for epoch in range(1, n_epochs+1):

# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0

###################
# train the model #
###################
model.train()
for data, target in dataloader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        data, target = data.cuda(), target.cuda()
    # clear the gradients of all optimized variables
    optimizer.zero_grad()
    # forward pass: compute predicted outputs by passing inputs to the model
    
    output = model(data)
    # calculate the batch loss
    loss = criterion(output, target)
    # backward pass: compute gradient of the loss with respect to model parameters
    loss.backward()
    # perform a single optimization step (parameter update)
    optimizer.step()
    # update training loss
    train_loss += loss.item()*data.size(0)
    
######################    
# validate the model #
######################
model.eval()
for data, target in valid_loader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        data, target = data.cuda(), target.cuda()
    # forward pass: compute predicted outputs by passing inputs to the model
    output = model(data)
    # calculate the batch loss
    loss = criterion(output, target)
    # update average validation loss 
    valid_loss += loss.item()*data.size(0)

# calculate average losses
train_loss = train_loss/len(train_loader.sampler)
valid_loss = valid_loss/len(valid_loader.sampler)
    
# print training/validation statistics 
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
    epoch, train_loss, valid_loss))

# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
    print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
    valid_loss_min,
    valid_loss))
    torch.save(model.state_dict(), 'model_cifar.pt')
    valid_loss_min = valid_loss

And dataloader is

data_dir= ‘corn’
transform=transforms.Compose([transforms.Resize(136),
transforms.CenterCrop(135),
transforms.ToTensor()])
dataset=datasets.ImageFolder(data_dir,transform=transform)
dataloader= torch.utils.data.DataLoader(dataset,batch_size=10,shuffle=True)

Add nn.Flatten layer before your first nn.Linear layer.
Or use x = x.view(x.shape[0], -1) before forwarding your first linear layer.

How to use nn.Flatten kindly tell me i tried my best.