Problem with back propogration

Can someone point out the mistake in my code, loss is not updating optimizing parameter:b

import torch
from torch.nn import Parameter

if __name__ == '__main__':
    a = torch.arange(start=1, end=5, dtype=torch.float32)
    b = torch.arange(start=4, end=0, step=-1, dtype=torch.float32)

    optim = torch.optim.Adam([Parameter(b, requires_grad=True)], lr=1e-3)
    b.requires_grad = True
    b.retain_grad()

    for i in range(100):
        optim.zero_grad()
        loss = a @ b.t()
        loss.backward()
        optim.step()
        print(loss)

Create b diredctly as an nn.Parameter object and it should work.
Currently you are passing a temporary object to the optimizer, which will not update the b tensor.

It works now

import torch
from torch import nn

if __name__ == '__main__':
    a = torch.arange(start=1, end=5, dtype=torch.float32)
    b = torch.arange(start=4, end=0, step=-1, dtype=torch.float32)
    b_p = nn.Parameter(b)


    optim = torch.optim.Adam([b_p], lr=1e-3)
    b.requires_grad = True
    b.retain_grad()

    for i in range(100):
        optim.zero_grad()
        loss = a @ b_p
        loss.backward()
        optim.step()
        print(loss, b)