Simple skip connection code not working in pytorch

My simple code is below.

class AutoEncoder(nn.Module):
def init(self):
super(AutoEncoder, self).init()

    edim1 = n_imgs

    self.input_layer = nn.Sequential(

        nn.LayerNorm(simg_size),
        nn.Linear(simg_size, edim1, bias = False),
        nn.LeakyReLU(0.2, inplace=True)
    )

    self.h_layer = nn.Sequential(
            nn.LayerNorm(edim1),
            nn.Linear(edim1, edim1, bias = False),
            nn.LeakyReLU(0.2, inplace=True)
        )

    self.output_layer = nn.Sequential(
        nn.LayerNorm(edim1),
        nn.Linear(edim1, img_size, bias = False),
        nn.Tanh()
    )

def decoder(self, img):
    z1 = self.input_layer(img)
    z2 = self.h_layer(z1)
    z2 += z1  #THE LINE PRODUCING THE ERROR
    out = self.output_layer(z2)
    return out

THE ERROR

RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [64, 64]], which is output 0 of LeakyReluBackward1, is at version 2; expected version 1 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).

@Emmanuel357
Replicating the error in a simpler way

a=torch.autograd.Variable(torch.Tensor([1]), requires_grad=True)
b=torch.autograd.Variable(torch.Tensor([2]), requires_grad=True)
print([a, b])
[tensor([1.], requires_grad=True), tensor([2.], requires_grad=True)]

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
/tmp/ipykernel_4808/4177809845.py in <module>
      2 b=torch.autograd.Variable(torch.Tensor([2]), requires_grad=True)
      3 print([a, b])
----> 4 a+=b
      5 print(a)

RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.

SOLUTION

a=a+b
print(a)
tensor([3.], grad_fn=<AddBackward0>)