I want to implement GCN on my data.
the data looks like this:
Data(pos=[4, 2], x=[4, 1080, 1920, 3], y=[4], edge_index=[2, 12], edge_attr=[12, 1])
my code:
# Set optimizer (adam)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Define loss function
criterion = torch.nn.CrossEntropyLoss()
# Initialize train function
def train():
model.train()
for data in trainset:
out = model(data.x.view(-1, 3), data.edge_index, data.batch)
loss = criterion(out, data.y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Define test function
def test(loader):
model.eval()
correct = 0
for data in loader:
out = model(data.x, data.edge_index, data.batch)
pred = out.argmax(dim=1)
correct += int((pred == data.y).sum())
return correct / len(loader.dataset)
# Run for 200 epochs (range is exclusive in the upper bound)
for epoch in range(1, 201):
train()
train_acc = test(trainset)
test_acc = test(testset)
print(f'Epoch: {epoch:03d}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
the error:
RuntimeError Traceback (most recent call last)
/tmp/ipykernel_135/1724653464.py in <module>
29 # Run for 200 epochs (range is exclusive in the upper bound)
30 for epoch in range(1, 201):
---> 31 train()
32 train_acc = test(trainset)
33 test_acc = test(testset)
/tmp/ipykernel_135/1724653464.py in train()
9
10 for data in trainset:
---> 11 out = model(data.pixel_values.view(-1, 3), data.edge_index, data.batch)
12 loss = criterion(out, data.detection_score)
13 loss.backward()
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/tmp/ipykernel_135/2620939698.py in forward(self, x, edge_index, batch)
19 # 1. Obtain node embeddings
20 px = data.pixel_values.view(-1, 3)
---> 21 x = self.conv1(px, edge_index)
22 x = x.relu()
23 x = self.conv2(x, edge_index)
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/conv/gcn_conv.py in forward(self, x, edge_index, edge_weight)
180 edge_index = cache
181
--> 182 x = self.lin(x)
183
184 # propagate_type: (x: Tensor, edge_weight: OptTensor)
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/dense/linear.py in forward(self, x)
107 def forward(self, x: Tensor) -> Tensor:
108 """"""
--> 109 return F.linear(x, self.weight, self.bias)
110
111 @torch.no_grad()
/usr/local/lib/python3.8/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1846 if has_torch_function_variadic(input, weight, bias):
1847 return handle_torch_function(linear, (input, weight, bias), input, weight, bias=bias)
-> 1848 return torch._C._nn.linear(input, weight, bias)
1849
1850
RuntimeError: expected scalar type Byte but found Float