Hi,
I am trying to use a sparse tensor as adjacency matrix for graph algorithms. Here is the code:
def makeSparseAdjMatrix(inputs, outputs, outdegree):
"""_summary_
create a sparse adjacency matrix for a bipartite graph, with `input` input nodes, `outdegree` out edges from overy input node
"""
edgelist = makeBipartiteEdgelist(inputs, outputs, outdegree)
row, col = zip(*edgelist)
return torch.sparse_coo_tensor(torch.tensor([row, col], dtype=torch.int32),
torch.ones(len(row)), torch.Size([inputs, outputs])).coalesce()#.to_sparse_csr()
class basicNet(nn.Module):
def __init__(self, adjacency, inputs, outputs, testNum):
super(basicNet, self).__init__()
self.adjacency = adjacency
self.inputs = inputs
self.outputs = outputs
self.x = nn.Parameter(torch.randn(inputs))
def forward(self):
return torch.sparse.mm(self.x, self.adjacency, reduce='amax')
def runBasicNet(inputs, outputs, outdegree, testNum):
"""_summary_
run a basic net on a bipartite graph with `inputs` input nodes, `outputs` output nodes, and `outdegree` out edges from every input node
"""
adjacency = makeSparseAdjMatrix(inputs, outputs, outdegree)
net = basicNet(adjacency, inputs, outputs, testNum)
if torch.cuda.is_available():
adjacency = adjacency.to('cuda:0')
net = net.to('cuda:0')
print('data uses cuda')
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
labels = torch.tensor([random.randint(0, 5) for i in range(outputs - testNum)])
criterion = nn.CrossEntropyLoss()
train_mask = torch.tensor([True]*(outputs-testNum) + [False]*testNum)
for epoch in range(10):
optimizer.zero_grad()
result = net()
loss = criterion(result[train_mask], labels)
loss.backward()
optimizer.step()
print(f"Epoch: {epoch}, Loss: {loss}")
That is the error:
NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCUDA' backend.
When trying to run the same on CPU:
NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCPU' backend.
Where is it implemented?