shape '[-1, 12544]' is invalid for input of size 115200

class MyModel(nn.Module):
def init(self, num_classes: int = 50, dropout: float = 0.7) → None:

    super().__init__()

    self.conv1 = nn.Conv2d(3,16,3,1,1)
    self.conv2 = nn.Conv2d(16,32,3,1,1)
    self.conv3 = nn.Conv2d(32,64,3,1,1)
    self.conv4 = nn.Conv2d(64,128,3,1,1)
    self.conv5 = nn.Conv2d(128,256,3,1,1)
    self.dropout = nn.Dropout(0.4)


    self.pool = nn.MaxPool2d(2, 2)

  
    self.fc1 = nn.Linear(256*7*7,512)
    self.fc2 = nn.Linear(512, 50)
    
    self.dropout = nn.Dropout(0.4)

def forward(self, x: torch.Tensor) -> torch.Tensor:
 
    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))
    x = self.pool(F.relu(self.conv3(x)))
    x = self.pool(F.relu(self.conv4(x)))
    x = self.pool(F.relu(self.conv5(x)))


    # Flatten image into vector, pass to FC layers
    # print(x.shape)# [32, 64, 28, 28]
    x = x.view(-1, 256*7*7)


    x = self.dropout(x)
    x = self.fc1(x)
    x = F.relu(x)
    x = self.dropout(x)
    x = self.fc2(x)
    return x

Your flattening operation is wrong:

    # Flatten image into vector, pass to FC layers
    # print(x.shape)# [32, 64, 28, 28]
    x = x.view(-1, 256*7*7)

since it cannot flatten an input with 115200 elements to [-1, 256*7*7].
But even if the input would have the printed shape of [32, 64, 28, 28]], it would be most likely wrong since you are increasing the batch size:

x = torch.randn(32, 64, 28, 28)
x.view(-1, 256*7*7).shape
# torch.Size([128, 12544])

Use x = x.view(x.size(0), -1) to keep the batch size equal and to flatten the other dimensions before passing it to the linear layer.
If you are then running into a shape mismatch, make sure the in_features of self.fc1 match the flatten activation feature size.