Hi there, I’m trying to accumulate the loss manually like this:
# -- training network ----------------------------------------
num_epochs = 5
loss_function = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr = 0.01)
model.train()
for epoch in range(num_epochs):
_epoch_loss = []
for i in range(len(training_data['X'][0:20])): # all cartpole simulations (each sim. is an episode).
optimizer.zero_grad()
accumulated_MSE = 0.0
model[0].hebb = torch.zeros((30, 4), requires_grad = False)
model[2].hebb = torch.zeros((30, 30), requires_grad = False)
model[4].hebb = torch.zeros((1, 30), requires_grad = False)
for j in range(len(training_data['X'][i])): # all time steps of simulation i.
_input = torch.tensor(np.array([training_data['X'][i][j]]), dtype = torch.float32)
_target = torch.tensor(np.array([training_data['Y'][i][j]]), dtype = torch.float32)
prediction = model(_input)
prediction = torch.tensor(np.array(prediction.tolist()[0]), dtype = torch.float32)
accumulated_MSE += loss_function(prediction, _target).item()
accumulated_MSE_tensor = torch.tensor(accumulated_MSE, dtype=torch.float32, requires_grad=True)
accumulated_MSE_tensor.backward()
optimizer.step()
model[0].hebb.detach_()
model[2].hebb.detach_()
model[4].hebb.detach_()
# episode ends.
_epoch_loss.append(accumulated_MSE)
print(f'epoch {epoch} loss: {np.mean(_epoch_loss)}')
The problem is that the loss is not changing between epochs. What’s the correct way to accumulate the loss manually?