RuntimeError: Input, output and indices must be on the current device

Hello, I think I already put all inputs and model in the same device. Yet I’m still getting the errror. Can someone help me?

def train(args, model, device, loader, optimizer):
    model.train()

    for step, batch in enumerate(tqdm(loader, desc="Iteration")):
        # batch = batch.to(device)
        print(f'device:{device}')
        batch.x = batch.x.to(device)
        batch.p = batch.p.to(device)
        batch.edge_index = batch.edge_index.to(device)
        batch.edge_attr = batch.edge_attr.to(device)
        batch.batch = batch.batch.to(device)
        model.to(device)
        print(f'batch.x:{batch.x}')
        print(f'batch.p:{batch.p}')
        print(f'batch.edge_index:{batch.edge_index}')
        print(f'batch.edge_attr:{batch.edge_attr}')
        print(f'batch:{batch.batch}')
        batch = batch.to(device)
        print(f'model device cuda: {next(model.parameters()).is_cuda}')
        pred, h = model(batch.x, batch.p, batch.edge_index,
                        batch.edge_attr, batch.batch)
        y = batch.y.view(pred.shape).to(torch.float64)

        # Whether y is non-null or not.
        is_valid = y**2 > 0
        # Loss matrix
        loss_mat = criterion(pred.double(), (y + 1) / 2)
        # loss matrix after removing null target
        loss_mat = torch.where(is_valid, loss_mat, torch.zeros(
            loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))

        optimizer.zero_grad()
        loss = torch.sum(loss_mat) / torch.sum(is_valid)
        loss.backward()

        optimizer.step()

The output of the above codes:

device:cuda:0
batch.x:tensor([[ 8.0000, 15.9990, 1.5500, 2.0000, 6.0000],
[ 6.0000, 12.0110, 1.7000, 4.0000, 4.0000],
[ 6.0000, 12.0110, 1.7000, 4.0000, 4.0000],
…,
[ 1.0000, 1.0080, 1.2000, 1.0000, 1.0000],
[ 1.0000, 1.0080, 1.2000, 1.0000, 1.0000],
[ 1.0000, 1.0080, 1.2000, 1.0000, 1.0000]], device=‘cuda:0’)
batch.p:tensor([[-3.4916, -0.1727],
[-2.6944, -0.1722],
[-2.2801, -1.6138],
…,
[ 4.4742, -5.0393],
[ 3.3981, -2.2389],
[-2.5284, -1.3024]], device=‘cuda:0’)
batch.edge_index:tensor([[ 0, 1, 1, …, 1416, 1395, 1417],
[ 1, 0, 2, …, 1394, 1417, 1395]], device=‘cuda:0’)
batch.edge_attr:tensor([[2],
[2],
[1],
…,
[1],
[1],
[1]], device=‘cuda:0’)
batch:tensor([ 0, 0, 0, …, 31, 31, 31], device=‘cuda:0’)
model device cuda: True

Do you think is_valid is on the same device?
Also, do you know the line number of the error?

Hi, @InnovArul

The error occurs before loss_mat and is on this line “pred, h = model(batch.x, batch.p, batch.edge_index, batch.edge_attr, batch.batch)”

Traceback (most recent call last):
File “finetune.py”, line 286, in
main()
File “finetune.py”, line 256, in main
train(args, model, device, train_loader, optimizer)
File “finetune.py”, line 48, in train
batch.edge_attr, batch.batch)
File “/home/liuy69/bin/anaconda2/envs/GCN_property/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 727, in _call_impl
result = self.forward(*input, **kwargs)
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/model.py”, line 140, in forward
node_representation = self.gnn(x= x, edge_index = edge_index, edge_attr = edge_attr, p = p)
File “/home/liuy69/bin/anaconda2/envs/GCN_property/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 727, in _call_impl
result = self.forward(*input, **kwargs)
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/model.py”, line 52, in forward
sim_sc = kernel_layer(data = data)
File “/home/liuy69/bin/anaconda2/envs/GCN_property/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 727, in _call_impl
result = self.forward(*input, **kwargs)
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/kernels.py”, line 569, in forward
return self.conv(data=data)
File “/home/liuy69/bin/anaconda2/envs/GCN_property/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 727, in _call_impl
result = self.forward(*input, **kwargs)
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/kernels.py”, line 506, in forward
deg, x, p, edge_index, edge_attr)
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/kernels.py”, line 451, in convert_graph_to_receptive_field
deg=deg, x=x, edge_index=edge_index, p=p, edge_attr=edge_attr)
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/kernels.py”, line 425, in get_neighbor_nodes_and_edges_of_degree
edge_attr, edge_index, center_index[i])
File “/home/liuy69/projects/GCN_Syn/examples/pretrain-gnns/chem/kernels.py”, line 393, in get_edge_attr_support_from_center_node
input=edge_attr, dim=0, index=bond_id)
RuntimeError: Input, output and indices must be on the current device

Solved. It turned out that I have some tensors created inside the model, which were not assigned device.