Customized wieghts for linear layer prohibit their updated after optimizer.step()

the issue should be clearly explained in the title and this piece of code
the only if condition was never met

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
dtype = torch.float32
X = torch.tensor([[1, 2, 3, 4, 5, 6]], dtype=dtype)
Y = torch.tensor([[1, 4, 9, 16, 25, 36]], dtype=dtype)


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.l1 = torch.nn.Linear(6, 6)
        self.l2 = torch.nn.Linear(6, 6)
        self.l3 = torch.nn.Linear(6, 6)
        self.l4 = torch.nn.Linear(6, 6)

    def forward(self, x):
        x1 = self.l1(x)
        x1 = F.relu(x1)
        x1 = self.l2(x1)
        x2 = x1
        x2 = F.relu(x2)
        print('l2 inside forward before assignment', self.l3.weight[2, 3])
        self.l3.weight = nn.Parameter(x1.repeat(6, 1)) # this is the cause
        print('l2 inside forward after assignment', self.l3.weight[2, 3])
        x2 = self.l3(x2)
        x2 = F.relu(x2)
        x2 = self.l4(x2)
        return x2, x1


model = Model()
optimizer = optim.SGD(model.parameters(), lr=.05, momentum=0.9)  # model.parameters()
Loss = nn.MSELoss()
n_iters = 100
for epoch in range(n_iters):
    optimizer.zero_grad()
    y_pred, x1 = model(X)
    loss = Loss(y_pred, Y)
    print('l2 outside forward', model.l3.weight[2, 3])
    e = torch.tensor(np.array(model.l3.weight.data.detach()))
    loss.backward()
    optimizer.step()
    print('l2 after backward and step', model.l3.weight[2, 3])
    print('e l2 after backward and step', e[2, 3])
    if torch.all(torch.eq(e, model.l3.weight.data)) == False: # to check if any parameter has changed
        print('something has changed in l3 :)')
    print('')

the weights assignments must be excuted as follows:

self.l3.weight.data = x1.repeat(6, 1)