MultivariateNormal backward error with respect to covariance

Here is a simple piece of code to learn covariance of 2D gaussian.

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam

class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.cov = nn.Parameter(torch.tensor([0.],requires_grad = True))
        self.cur_cov = torch.cat((torch.tensor([1.0]), self.cov,self.cov,torch.tensor([1.0])),axis=0).reshape(2,2)
        self.dist = torch.distributions.multivariate_normal.MultivariateNormal(\
                torch.tensor([0.,0.]),self.cur_cov)

    def forward(self,x):
        return self.dist.log_prob(x)

model = Net()
op = Adam(model.parameters(),lr=0.1)


data = torch.tensor([1,2,3,5]).reshape(-1,2)
for i in range(10):
    op.zero_grad()
    log_p = model(data)
    loss = -torch.sum(log_p)
    loss.backward()
    op.step()

The code raise error

RuntimeError: Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time.

However, as I added the flag

for i in range(10):
    op.zero_grad()
    log_p = model(data)
    loss = -torch.sum(log_p)
    loss.backward(retain_graph=True)
    op.step()
    print(loss.item())

The loss and model.cov did not change.