My code is below
loss_epoch_arr = []
max_epochs = 5
min_loss = 1000
n_iters = np.ceil(50000/training_batchsize)
for epoch in range(max_epochs):
for i, data in enumerate(train_loader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
opt.zero_grad()
outputs, aux_outputs = inception(inputs)
loss = loss_fn(outputs, labels) + 0.3 * loss_fn(aux_outputs, labels)
loss.backward()
opt.step()
if min_loss > loss.item():
min_loss = loss.item()
best_model = copy.deepcopy(inception.state_dict())
print('Min loss %0.2f' % min_loss)
if i % 100 == 0:
print('Iteration: %d/%d, Loss: %0.2f' % (i, n_iters, loss.item()))
del inputs, labels, outputs
torch.cuda.empty_cache()
loss_epoch_arr.append(loss.item())
print('Epoch: %d/%d, Test acc: %0.2f, Train acc: %0.2f' % (
epoch, max_epochs,
evaluation_inception(test_loader, inception),
evaluation_inception(train_loader, inception)))
plt.plot(loss_epoch_arr)
plt.show()
Error occurred:-
RuntimeError:-Calculated padded input size per channel: (3 x 3). Kernel size: (5 x 5). Kernel size can’t be greater than actual input size
How to resolve it?