Hi everyone. I’m trying to implement a custom layer like this:

```
import torch.nn as nn
import math
import torch
class LinLayer(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self, size_in, size_out):
super().__init__()
self.size_in, self.size_out = size_in, size_out
weights = torch.Tensor(size_out, size_in)
self.weights = nn.Parameter(weights) # nn.Parameter is a Tensor that's a module parameter.
bias = torch.Tensor(size_out)
self.bias = nn.Parameter(bias)
self.hebb = torch.zeros((size_out, size_in), requires_grad=False)
# initialize weights and biases
nn.init.kaiming_uniform_(self.weights, a=math.sqrt(5)) # weight init
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weights)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound) # bias init
def forward(self, x):
hebb = self.hebb
w_times_x = torch.mm(x, self.weights.t() + hebb.t())
yout = torch.add(w_times_x, self.bias) # w times x + b
self.hebb = torch.matmul(yout.t(), x)
return yout
```

the problem is that when I try to update self.hebb = torch.matmul(yout.t(), x) at the end I get a “RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed).”. I have already tried detaching the self.hebb tensor but it make my loss a nan. How can I get this code to work?