How to get predict interval using Monte Calro Dropout. RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

Hi. I have the problem here.
Now, I am trying to get predict interval with using Monte Calro dropout.
However, I got following errors when I call loss.backward() after the running of following code
I do not kown why, any reply will be appreciated.
thanks.

class Net(nn.Module):
    def __init__(self, x1, x2, p, m, mode=0):
        super().__init__()
        input_dim = p*m
        hidden_dim = 128
        self.dropout = nn.Dropout(p=0.1)
        if mode == 1:
            self.embed = Encoder1(x1, x2, p)
        elif mode == 2:
            self.embed = Encoder2(x1, x2, p)
        else:
            self.embed = Encoder(x1, x2, p)

        self.share_block = nn.Sequential(
            ・・・・・
        )
        self.head = nn.Sequential(
            nn.BatchNorm1d(hidden_dim),
            nn.utils.weight_norm(nn.Linear(hidden_dim, 1))
        )

    def forward(self, x):
        x = self.share_block(x)
        _out = self.head(x)
        _out = self.dropout(_out)

        return _out

    def predict(self, x1, x2, alpha=0.95, samples=100):
        x1 = x1.to(device)
        x2 = x2.to(device)
        x = self.embed(x1, x2)
        z_c = norm.ppf(alpha)
        samples_ = [self.forward(x).detach()
                    for _ in range(samples)]
        pred_sample = torch.cat(samples_, dim=1)
        pred_mean = torch.mean(pred_sample, dim=1)
        pred_std = z_c * torch.std(pred_sample, dim=1)

        return pred_mean, pred_std

def train_and_test_gpu(model, num_epochs, dataset):
    train_dataset = utils.data.Subset(dataset, train_idx)
    train_loader = utils.data.DataLoader(
        train_dataset, batch_size=64, worker_init_fn=seed_worker, generator=g, shuffle=True)

    test_dataset = utils.data.Subset(dataset, test_idx)
    test_loader = utils.data.DataLoader(
        test_dataset, batch_size=64, shuffle=False)

    optimizer = optim.Adam(model.parameters(), lr=1e-6)
    criterion = nn.MSELoss()

    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=1e-3,
        steps_per_epoch=len(train_loader),
        epochs=num_epochs,
    )

    model.train()
    for epoch in tqdm(range(num_epochs)):
        for x1, x2, y in train_loader:

            x1 = x1.to(device)
            x2 = x2.to(device)
            y = torch.Tensor(y).to(device)

            optimizer.zero_grad()
            y_pred, _ = model.predict(x1, x2)

            loss = criterion(y_pred, y)
            loss.backward()  <ーーーーーー error occured here
            optimizer.step()
            scheduler.step()

    model.eval()
    with torch.no_grad():
        y_true_list = []
        y_pred_list = []
        for x1, x2, y_true in test_loader:
            x1 = torch.Tensor(x1).to(device)
            x2 = torch.Tensor(x2).to(device)

            y_pred, _ = model.predict(x1, x2)
            y_pred_list.extend(y_pred.detach().flatten().cpu().tolist())
            y_true_list.extend(y_true.detach().cpu().tolist())
    return y_true_list, y_pred_list

In your predict method you are explicitly detaching the tensors from the computation graph in:

samples_ = [self.forward(x).detach()
            for _ in range(samples)]

so that the samples_ doesn’t have a valid grad_fn anymore (it’s not attached to any computation graph) and thus the backward call fails.

Thanks for reply. I could run my code normally !!!