Currently I update my per epoch loss as such:
for i in range(epoch):
print('epoch %d:' % i)
epoch_loss = 0
model.train()
for graph1, graph2, target in train_loader:
pred = torch.squeeze(model(graph1, graph2))
pred = pred.detach().cpu().numpy()
target = target.detach().cpu().numpy()
pred = torch.tensor(pred, requires_grad=True)
target = torch.LongTensor(target)
loss = loss_fn(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_loss /= (iter + 1)
print('Epoch {}, loss {:.4f}'.format(epoch, epoch_loss))
epoch_losses.append(epoch_loss)
However the line
epoch_loss /= (iter + 1)
gives me this error
TypeError: unsupported operand type(s) for +: 'builtin_function_or_method' and 'int'
How do I update the loss in a different way? Why is this giving an error?