@ptrblck I found another problem in L-273, and I changed the class following your suggestions:
@@ -66,6 +66,7 @@ class NeuralNetwork(nn.Module):
"""
X = torch.tensor(X, requires_grad=False)
+ X = X.unsqueeze(0)
X = self.linears[symbol](X)
intercept_name = 'intercept_' + symbol
@@ -164,6 +165,7 @@ class NeuralNetwork(nn.Module):
for key in self.state_dict():
old_state_dict[key] = self.state_dict()[key].clone()
+ targets = [[target] for target in targets]
targets = torch.tensor(targets, requires_grad=False)
# Define optimizer
@@ -196,7 +198,7 @@ class NeuralNetwork(nn.Module):
outputs.append(image_energy)
- outputs = torch.cat(outputs)
+ outputs = torch.stack(outputs)
loss, rmse = self.get_loss(outputs, targets, data.atoms_per_image)
_loss.append(loss)
_rmse.append(rmse)
@@ -272,9 +274,10 @@ class NeuralNetwork(nn.Module):
"""
self.optimizer.zero_grad() # clear previous gradients
-
+ atoms_per_image = [[number] for number in atoms_per_image]
atoms_per_image = torch.tensor(atoms_per_image, requires_grad=False,
dtype=torch.float)
+
outputs_atom = torch.div(outputs, atoms_per_image)
targets_atom = torch.div(targets, atoms_per_image)
With those changes, now there is a difference in the 6th decimal:
outputs
tensor([[[-14.5754384995]],
[[-14.5754394531]]], grad_fn=<StackBackward>)
targets
tensor([[-14.5868730545],
[-14.5640010834]])
Applying MinMaxScaler
from sklearn to the features gave the same result.