Hi, I’m training a GNN model (encoder) in Pytorch geometric with a custom loss function and I’m facing an issue while doing the training iteration for my model. The code is as below:
def train(dataset, epochs, criterion, writer):
train_loader = DataLoader(dataset, batch_size=1, collate_fn=dataset.collate_fn, shuffle=False)
hidden_features = 16
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# build the model
model = GNN(dataset.num_features, hidden_features=hidden_features)
model = model.to(device)
opt = torch.optim.Adam(model.parameters(), lr=0.0001)
# train
for epoch in range(1, epochs + 1):
total_loss = 0
model.train()
for batch in train_loader:
for data in batch:
data.cuda()
# Zero your gradients for every batch!
opt.zero_grad()
output = model(x, edge_index)
loss = criterion.forward(output, y)
loss.sum().backward()
opt.step()
total_loss += loss
writer.add_scalar("loss", total_loss, epoch)
print("Epoch {}. Loss: {:.4f}".format(
epoch, loss))
return model
can you please tell me how to fix this and if the training process is correct?