RuntimeError: Found dtype Long but expected Float in GCNConv

    def __init__(self, arg):
        super(Net, self).__init__()
        hidden_layer_dimension = arg
        print(data.num_node_features,hidden_layer_dimension)
        self.float()
        self.conv1 = GCNConv(data.num_node_features, hidden_layer_dimension)
        self.conv2 = GCNConv(hidden_layer_dimension, data.num_classes)
#         self.conv1 = SAGEConv(data.num_node_features, hidden_layer_dimension)
#         self.conv2 = SAGEConv(hidden_layer_dimension, data.num_classes)
        self.float()

    def forward(self, data):
#         print(data.x.dtype,data.edge_index.dtype)
        x, edge_index = data.x, data.edge_index
        print(x.dtype)
        print(edge_index.dtype)
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = self.conv2(x, edge_index)

        return F.log_softmax(x, dim=1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# print(device)
hidden_dimensions = [2,4,8,16,64,512]
accs = []
epoch_stable = []
for i in hidden_dimensions:
    print('For Hidden Dimension = '+str(i))
    model = Net(i).to(device)
#     print(model.device)
    data = data.to(device)
#     print(data.device)
#     print(model.conv1)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
    model.train()
    train_accuracies = []
    for epoch in range(101):
        optimizer.zero_grad()
        out = model(data)
        print(out.dtype)
        loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
        loss.backward()
        optimizer.step()

        _, pred = model(data).max(dim=1)
        correct = float (pred[data.train_mask].eq(data.y[data.train_mask]).sum().item())
        acc = correct / data.train_mask.sum().item()
            
        train_accuracies.append(acc)
    
    for i in range(101):
        if train_accuracies[i] == max(train_accuracies):
            epoch_stable.append(i)
            break
            
    _, pred = model(data).max(dim=1)
    correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
    acc = correct / data.test_mask.sum().item()
    accs.append(acc)
    print(acc)
    print()
    
for i in range(len(hidden_dimensions)):
    hidden_dimensions[i] = 'Hidden Dimension = '+str(hidden_dimensions[i])

df_hidden_dims = pd.DataFrame([accs,epoch_stable],columns = hidden_dimensions,index = ['Accuracy','Number of Epochs'])

StackTrace


RuntimeError Traceback (most recent call last)
Input In [201], in <cell line: 6>()
16 for epoch in range(101):
17 optimizer.zero_grad()
—> 18 out = model(data)
19 print(out.dtype)
20 loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don’t have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []

Input In [200], in Net.forward(self, data)
16 print(x.dtype)
17 print(edge_index.dtype)
—> 18 x = self.conv1(x, edge_index)
19 x = F.relu(x)
20 x = self.conv2(x, edge_index)

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don’t have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch_geometric\nn\conv\gcn_conv.py:176, in GCNConv.forward(self, x, edge_index, edge_weight)
174 cache = self._cached_edge_index
175 if cache is None:
→ 176 edge_index, edge_weight = gcn_norm( # yapf: disable
177 edge_index, edge_weight, x.size(self.node_dim),
178 self.improved, self.add_self_loops, self.flow, x.dtype)
179 if self.cached:
180 self._cached_edge_index = (edge_index, edge_weight)

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch_geometric\nn\conv\gcn_conv.py:69, in gcn_norm(edge_index, edge_weight, num_nodes, improved, add_self_loops, flow, dtype)
67 idx = col if flow == “source_to_target” else row
68 deg = scatter_add(edge_weight, idx, dim=0, dim_size=num_nodes)
—> 69 deg_inv_sqrt = deg.pow_(-0.5)
70 deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float(‘inf’), 0)
71 return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]

RuntimeError: Found dtype Long but expected Float

Based on the stacktrace:

—> 18 out = model(data)
19 print(out.dtype)
20 loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])

the error is raised in the forward pass of the model as data seems to have a wrong dtype.
While tensor in float32 is expected you seem to pass it in the long dtype.

PS: you can post code snippets by wrapping them into three backticks ```.

Thank you for your reply.

However, the data consists of only int64 values. There are no Long Data types in the data.

int64 represents long in C/C++ and also PyTorch.

1 Like

Thank you very much. I resolved the error by converting all the data to int32 instead of int64.

However, after this, when running I am getting IndexError: tensors used as indices must be long, byte or bool tensors.

StackTrace

IndexError                                Traceback (most recent call last)
Input In [43], in <cell line: 6>()
     16 for epoch in range(101):
     17     optimizer.zero_grad()
---> 18     out = model(data)
     19     print(out.dtype)
     20     loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
   1190 # If we don't have any hooks, we want to skip the rest of the logic in
   1191 # this function, and just call forward.
   1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1193         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194     return forward_call(*input, **kwargs)
   1195 # Do not call functions when jit is used
   1196 full_backward_hooks, non_full_backward_hooks = [], []

Input In [42], in Net.forward(self, data)
     16 print(x.dtype)
     17 print(edge_index.dtype)
---> 18 x = self.conv1(x, edge_index)
     19 x = F.relu(x)
     20 x = self.conv2(x, edge_index)

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
   1190 # If we don't have any hooks, we want to skip the rest of the logic in
   1191 # this function, and just call forward.
   1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1193         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194     return forward_call(*input, **kwargs)
   1195 # Do not call functions when jit is used
   1196 full_backward_hooks, non_full_backward_hooks = [], []

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch_geometric\nn\conv\gcn_conv.py:178, in GCNConv.forward(self, x, edge_index, edge_weight)
    176 cache = self._cached_edge_index
    177 if cache is None:
--> 178     edge_index, edge_weight = gcn_norm(  # yapf: disable
    179         edge_index, edge_weight, x.size(self.node_dim),
    180         self.improved, self.add_self_loops, self.flow, x.dtype)
    181     if self.cached:
    182         self._cached_edge_index = (edge_index, edge_weight)

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch_geometric\nn\conv\gcn_conv.py:62, in gcn_norm(edge_index, edge_weight, num_nodes, improved, add_self_loops, flow, dtype)
     57     edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
     58                              device=edge_index.device)
     61 if add_self_loops:
---> 62     edge_index, tmp_edge_weight = add_remaining_self_loops(
     63         edge_index, edge_weight, fill_value, num_nodes)
     64     assert tmp_edge_weight is not None
     65     edge_weight = tmp_edge_weight

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch_geometric\utils\loop.py:298, in add_remaining_self_loops(edge_index, edge_attr, fill_value, num_nodes)
    295         raise AttributeError("No valid 'fill_value' provided")
    297     inv_mask = ~mask
--> 298     loop_attr[edge_index[0][inv_mask]] = edge_attr[inv_mask]
    300     edge_attr = torch.cat([edge_attr[mask], loop_attr], dim=0)
    302 edge_index = torch.cat([edge_index[:, mask], loop_index], dim=1)

IndexError: tensors used as indices must be long, byte or bool tensors

I understood that the edge_index needs to be int64 only. So changed only x and y to float32.

Now it is working.

However, the Loss Function is giving error.

StackTrace

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
Input In [350], in <cell line: 6>()
     18 out = model(data)
     19 print(out.shape)
---> 20 loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
     21 loss.backward()
     22 optimizer.step()

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\functional.py:2701, in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
   2699 if size_average is not None or reduce is not None:
   2700     reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2701 return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

RuntimeError: 0D or 1D target tensor expected, multi-target not supported