RuntimeError: expected scalar type Byte but found Float

I want to implement GCN on my data.
the data looks like this:

Data(pos=[4, 2], x=[4, 1080, 1920, 3], y=[4], edge_index=[2, 12], edge_attr=[12, 1])

my code:

# Set optimizer (adam)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Define loss function
criterion = torch.nn.CrossEntropyLoss()
 
# Initialize train function
def train():
    model.train()
 
    for data in trainset:  
      out = model(data.x.view(-1, 3), data.edge_index, data.batch)  
      loss = criterion(out, data.y)  
      loss.backward()  
      optimizer.step()  
      optimizer.zero_grad()  
 
# Define test function
def test(loader):
  model.eval()
 
  correct = 0
  for data in loader:  
      out = model(data.x, data.edge_index, data.batch)  
      pred = out.argmax(dim=1)  
      correct += int((pred == data.y).sum())  
  return correct / len(loader.dataset)  
 
  
# Run for 200 epochs (range is exclusive in the upper bound)
for epoch in range(1, 201):
    train()
    train_acc = test(trainset)
    test_acc = test(testset)
    print(f'Epoch: {epoch:03d}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')

the error:

RuntimeError                              Traceback (most recent call last)
/tmp/ipykernel_135/1724653464.py in <module>
     29 # Run for 200 epochs (range is exclusive in the upper bound)
     30 for epoch in range(1, 201):
---> 31     train()
     32     train_acc = test(trainset)
     33     test_acc = test(testset)

/tmp/ipykernel_135/1724653464.py in train()
      9 
     10     for data in trainset:
---> 11       out = model(data.pixel_values.view(-1, 3), data.edge_index, data.batch)
     12       loss = criterion(out, data.detection_score)
     13       loss.backward()

/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

/tmp/ipykernel_135/2620939698.py in forward(self, x, edge_index, batch)
     19       # 1. Obtain node embeddings
     20       px = data.pixel_values.view(-1, 3)
---> 21       x = self.conv1(px, edge_index)
     22       x = x.relu()
     23       x = self.conv2(x, edge_index)

/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/conv/gcn_conv.py in forward(self, x, edge_index, edge_weight)
    180                     edge_index = cache
    181 
--> 182         x = self.lin(x)
    183 
    184         # propagate_type: (x: Tensor, edge_weight: OptTensor)

/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/dense/linear.py in forward(self, x)
    107     def forward(self, x: Tensor) -> Tensor:
    108         """"""
--> 109         return F.linear(x, self.weight, self.bias)
    110 
    111     @torch.no_grad()

/usr/local/lib/python3.8/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
   1846     if has_torch_function_variadic(input, weight, bias):
   1847         return handle_torch_function(linear, (input, weight, bias), input, weight, bias=bias)
-> 1848     return torch._C._nn.linear(input, weight, bias)
   1849 
   1850 

RuntimeError: expected scalar type Byte but found Float

The type mismatch is raised in:

out = model(data.pixel_values.view(-1, 3), data.edge_index, data.batch)

and based on the code snippet I guess that data.pixel_values might be returning a ByteTensor (uint8) while float32 is expected. Call .float() on this tensor, if that’s the case. If not, check which tensor is the ByteTensor and cast it to a FloatTensor.

1 Like

I got this error, Sir:

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/tmp/ipykernel_67/846327309.py in <module>
     30 # Run for 200 epochs (range is exclusive in the upper bound)
     31 for epoch in range(1, 201):
---> 32     train()
     33     train_acc = test(trainset)
     34     test_acc = test(testset)

/tmp/ipykernel_67/846327309.py in train()
     10 
     11     for data in trainset:
---> 12       out = model(data.pixel_values.view(-1, 3).float(), data.edge_index, data.batch)
     13       loss = criterion(out, data.detection_score)
     14       loss.backward()

/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

/tmp/ipykernel_67/355969921.py in forward(self, x, edge_index, batch)
     25 
     26       # 2. Readout layer
---> 27       x = global_mean_pool(x, batch)  # [batch_size, hidden_channels]
     28 
     29       # 3. Apply a final classifier

/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/glob/glob.py in global_mean_pool(x, batch, size)
     46     """
     47 
---> 48     size = int(batch.max().item() + 1) if size is None else size
     49     return scatter(x, batch, dim=0, dim_size=size, reduce='mean')
     50 

AttributeError: 'NoneType' object has no attribute 'max'

It seems that batch in:

x = global_mean_pool(x, batch)
...
size = int(batch.max().item() + 1) if size is None else size

is None so make sure it’s a valid tensor before calling max().item() on it.

1 Like

Ok Sir. If my Data looks like this:

Data(x=[4, 1080, 1920, 3], y=[4], edge_index=[2, 12], edge_attr=[12, 1])

What should be the num_node_features value??

I don’t know where num_node_features is used so also don’t know how it relates to the input data and which dimensions are used as the feature dimension.

Did you solve this error ??