Model's parameters for calculating the weight are not updated

Hi all, please help me,
I want to make a layer that its weight value (and the bias) is based on the other frozen weight. So, let’s say I have a frozen weight (FW), then my current model layer will have weight FW + D, where D is the trainable parameter. I hope, later, when I train the model, the only parameter that gets updated is D.

I made this simple code for illustration:

frozen= nn.Linear(100,10)
frozen.weight.requires_grad = False
frozen.bias.requires_grad = False

class Net(nn.Module):
    
    def __init__(self):
        super(Net, self).__init__()
        self.fc = nn.Linear(100,10)
        self.dw = nn.Parameter(torch.tensor(1.0, requires_grad=True))
        self.db = nn.Parameter(torch.tensor(1.0, requires_grad=True))

    def forward(self, x):
        self.fc.weight = nn.Parameter(torch.add(frozen.weight, self.dw))
        self.fc.bias = nn.Parameter(torch.add(frozen.bias, self.db))
        return torch.sigmoid(self.fc(x))
    
model = Net()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
x = torch.rand(100)
y = torch.tensor([0]*9+[1], dtype=torch.float32)

for _ in range(10):
    out = model(x)
    loss = criterion(out, y)
    print(loss)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

But when I run that code, the model doesn’t train, the self.dw and self.db doesn’t change. I am not sure whether my concept is wrong, so it’s not possible to train D, or I made a mistake in the implementation.

Update:
I tried to implement using nn.utils.parameterize, but it still doesn’t work

frozen = nn.Linear(100,10)
frozen.weight.requires_grad = False
frozen.bias.requires_grad = False

class Adder(nn.Module):
    def __init__(self, delta, frozen):
        super().__init__()
        self.delta = nn.Parameter(torch.tensor(delta, requires_grad=True))
        self.frozen=frozen
    def forward(self, x):
        return torch.add(self.frozen, self.delta)    

class Net(nn.Module):
    def __init__(self):    
        super(Net, self).__init__()
        self.fc = nn.Linear(100,10)

    def forward(self, x):
        nn.utils.parametrize.register_parametrization(self.fc, "weight",  Adder(1.0, frozen.weight))
        nn.utils.parametrize.register_parametrization(self.fc, "bias", Adder(1.0, frozen.bias))

        return torch.sigmoid(self.fc(x))
    
model = Net()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
x = torch.rand(100)
y = torch.tensor([0]*9+[1], dtype=torch.float32)

for _ in range(10):
    out = model(x)
    loss = criterion(out, y)
    print(loss)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

Thank you for any responses.