I am developing a graph neural network to predict the energy and the forces as the gradient of the energies when I run my code I have this error "One of the differentiated tensors appears not to have been used in the graph "
class ForceModule(MessagePassing):
def __init__(self, num_node_features, num_edge_features, hidden_channels, num_targets=1, device='cpu'):
super(ForceModule, self).__init__(aggr='add')
self.num_node_features = num_node_features
self.num_edge_features = num_edge_features
self.hidden_channels = hidden_channels
self.num_targets=num_targets
self.device = device
# Define the parameters for the message passing function
# Define the node and edge embeddings
self.node_embedding = nn.Linear(self.num_node_features, self.hidden_channels)
self.edge_embedding = nn.Linear(self.num_edge_features, self.hidden_channels)
# Define the final output layer
self.output_layer = nn.Linear(self.hidden_channels, self.num_targets)
def _forward(self, data):
x = self.node_embedding(data.x)
edge_attr = self.edge_embedding(data.edge_attr) # messages
# Incorporate the angles into the node representations
edge_angle = torch.unsqueeze(data.angle, 1)
edge_angle = edge_angle.repeat(1, self.hidden_channels)
# Concatenate the angle with the edge features
edge_attr = torch.cat([edge_attr, edge_angle], dim=1)
# Move the tensors to the specified device
#x, edge_attr = x.to(self.device), edge_attr.to(self.device)
# Pass messages between nodes
# hidden = self.propagate(x=x, edge_index=edge_index, edge_attr=edge_attr, norm=data.distances, size=size)
hidden = self.propagate(x=x, edge_index=data.edge_index, edge_attr=edge_attr)
hidden = F.relu(hidden)
# Perform graph readout operation.
x = global_mean_pool(hidden, data.batch) # perform global mean pooling
#x = global_max_pool(hidden, batch) # perform global max pooling
# Apply the output layer
x = self.output_layer(x)
return x
def forward(self, data):
# Move the tensors to the specified device
energy = self._forward(data)
data.pos.requires_grad_(True)
# Compute gradients using the copy of data.pos
forces = -1 * torch.autograd.grad(energy, data.pos, grad_outputs=torch.ones_like(energy), create_graph=True, retain_graph=True)[0]
return energy.view(-1), forces
“DataBatch(pos=[216, 3], cell=[2, 3, 3], atomic_numbers=[216], natoms=[2], tags=[216], edge_index=[2, 2506], cell_offsets=[2506, 3], y=[2], force=[216, 3], distances=[2506], fixed=[216], sid=[2], fid=[2], id=[2], x=[216, 92], edge_attr=[2506, 3], angle=[2506], batch=[216], ptr=[3])”
force_module = ForceModule(num_node_features=92, num_edge_features=3, hidden_channels=256, num_targets=1)
energy, forces = force_module(databatch)
Runtimme error: One of the differentiated Tensors appears to not have been used in the graph