Hello, I think I already put all inputs and model in the same device. Yet I’m still getting the errror. Can someone help me?
def train(args, model, device, loader, optimizer):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
# batch = batch.to(device)
print(f'device:{device}')
batch.x = batch.x.to(device)
batch.p = batch.p.to(device)
batch.edge_index = batch.edge_index.to(device)
batch.edge_attr = batch.edge_attr.to(device)
batch.batch = batch.batch.to(device)
model.to(device)
print(f'batch.x:{batch.x}')
print(f'batch.p:{batch.p}')
print(f'batch.edge_index:{batch.edge_index}')
print(f'batch.edge_attr:{batch.edge_attr}')
print(f'batch:{batch.batch}')
batch = batch.to(device)
print(f'model device cuda: {next(model.parameters()).is_cuda}')
pred, h = model(batch.x, batch.p, batch.edge_index,
batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
# Whether y is non-null or not.
is_valid = y**2 > 0
# Loss matrix
loss_mat = criterion(pred.double(), (y + 1) / 2)
# loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(
loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat) / torch.sum(is_valid)
loss.backward()
optimizer.step()
The output of the above codes:
device:cuda:0
batch.x:tensor([[ 8.0000, 15.9990, 1.5500, 2.0000, 6.0000],
[ 6.0000, 12.0110, 1.7000, 4.0000, 4.0000],
[ 6.0000, 12.0110, 1.7000, 4.0000, 4.0000],
…,
[ 1.0000, 1.0080, 1.2000, 1.0000, 1.0000],
[ 1.0000, 1.0080, 1.2000, 1.0000, 1.0000],
[ 1.0000, 1.0080, 1.2000, 1.0000, 1.0000]], device=‘cuda:0’)
batch.p:tensor([[-3.4916, -0.1727],
[-2.6944, -0.1722],
[-2.2801, -1.6138],
…,
[ 4.4742, -5.0393],
[ 3.3981, -2.2389],
[-2.5284, -1.3024]], device=‘cuda:0’)
batch.edge_index:tensor([[ 0, 1, 1, …, 1416, 1395, 1417],
[ 1, 0, 2, …, 1394, 1417, 1395]], device=‘cuda:0’)
batch.edge_attr:tensor([[2],
[2],
[1],
…,
[1],
[1],
[1]], device=‘cuda:0’)
batch:tensor([ 0, 0, 0, …, 31, 31, 31], device=‘cuda:0’)
model device cuda: True