Autograd for sparse matmul: getting either cuda memory leak or 'buffers have already been freed' error

Hi,

When I try your tourch.mm with the following inputs:

class SPMM(torch.autograd.Function):
""“
Implementation of matrix multiplication of a Sparse Variable with a Dense Variable, returning a Dense one.
This is added because there’s no autograd for sparse yet. No gradient computed on the sparse weights.
”""
def forward(self, sparse_weights, x):
self.save_for_backward(sparse_weights)
return torch.mm(sparse_weights, x)

def backward(self, grad_output):
    sparse_weights, = self.saved_tensors
    return None, torch.mm(sparse_weights.t(), grad_output)

dim = 5
class NN(nn.Module):
def init(self, dim = dim):
super(NN, self).init()
self.dim = dim
self.A = torch.autograd.Variable(random_sparse(n = dim))
self.w = torch.autograd.Variable(torch.Tensor(np.random.normal(0,1,(dim,dim))))

    self.fc1 = nn.Linear(dim, dim)
    self.fc2 = nn.Linear(dim, dim)
    self.X = torch.autograd.Variable(torch.eye(dim))
    
    self.SPMM = SPMM()
    
def f(self):
    return self.SPMM.forward(self.A, self.w)

I get the following error message:
TypeError: Type torch.sparse.FloatTensor doesn’t implement stateless method addmm

Any idea how this might be circumvented? Driving me insane…

1 Like