I have built a network and it seems that my first fully connected layer has not been set the correct input size, my problem is I don’t know how to properly set it. I keep getting the error:
in inner(_it, _timer)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1368 if input.dim() == 2 and bias is not None:
1369 # fused op is marginally faster
→ 1370 ret = torch.addmm(bias, input, weight.t())
1371 else:
1372 output = input.matmul(weight.t())RuntimeError: size mismatch, m1: [5 x 196608], m2: [26912 x 512] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:136
This is the code for my network I would appreciate any help I can get
# Convolutional neural network
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
# Add network layers here
# Layer 1
self.conv1 = nn.Conv2d(3,16, (3,3))
self.pool = nn.MaxPool2d(2,2)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.3)
# Layer 2
self.conv2 = nn.Conv2d(16,24, (4,4))
self.pool = nn.MaxPool2d(2,2)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.3)
conv = self.conv2
print(conv)
# Layer 3
self.conv3 = nn.Conv2d(24,32, (4,4))
self.pool = nn.MaxPool2d(2,2)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.3)
# Layer 4 - Fully connected
self.fc1 = nn.Linear(32 * 29 * 29, 512)
self.fc2 = nn.Linear(512, num_classes)
self.final = nn.Softmax(dim=1)
def forward(self, x):
out = x.reshape(x.size(0), -1) # TODO what does this do? Why do we need it?
out = self.fc1(out)
# Complete the graph
return out
net = ConvNet()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001,momentum=0.9)
print(len(test_loader))
def train_model_epochs(num_epochs):
for epoch in range(10):running_loss = 0.0
for i, data in enumerate(test_loader, 0):
images, labels = dataoptimizer.zero_grad() outputs = net(images) loss = loss_function(outputs, labels) loss.backward() optimizer.step() running_loss =+ loss.item() if i%1000 == 999: print('Epoch / Batch [%d / %d] - Loss: %.3f' % (epoch + 1, i + 1, running_loss / 1000)) running_loss = 0.0
print(“Training finished”)
cpu_train_time = timeit.timeit(
“train_model_epochs(num_epochs)”,
setup=“num_epochs=10”,
number=1,
globals=globals(),
)print("Training time: " + cpu_train_time)