IndexError: pop from empty list2

I want to add opacus to my model, but when I start the second round of training, the following error occurs: (there is no problem with only one training session)

loss.backward()

File “/mnt/DataDisk/conda/envs/syft/lib/python3.7/site-packages/torch/tensor.py”, line 245, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File “/mnt/DataDisk/conda/envs/syft/lib/python3.7/site-packages/torch/autograd/init.py”, line 147, in backward
allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
File “/mnt/DataDisk/xierong/opacus/opacus/grad_sample/grad_sample_module.py”, line 199, in capture_backprops_hook
module, backprops, loss_reduction, batch_first
File “/mnt/DataDisk/xierong/opacus/opacus/grad_sample/grad_sample_module.py”, line 237, in rearrange_grad_samples
A = module.activations.pop()
IndexError: pop from empty list

I have no idea, can you help give some advice?

My code as follows:
def train_model(self, train_loader, test_loader, save_name=None):
self.model = self.model.to(self.device)
optimizer = optim.Adam(self.model.parameters(), lr=self.args[‘lr’])
if self.args[‘optim’] == “SGD”:
optimizer = optim.SGD(self.model.parameters(), lr=self.args[‘lr’], momentum=0)

    if self.args['is_dp_defense']:
        privacy_engine = PrivacyEngine(secure_mode=False)

        self.model, optimizer, train_loader = privacy_engine.make_private(
            module=self.model,
            optimizer=optimizer,
            data_loader=train_loader,
            noise_multiplier=0.5,
            max_grad_norm=1.0,
        )


    criterion = nn.CrossEntropyLoss()
    losses = []
    run_result = []

    self.model.train()

    for epoch in range(self.args['num_epochs']):
        losses = []
        for data, target  in train_loader:

            data, target = data.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = self.model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            losses.append(loss.item())

        if self.args['is_dp_defense']:
            epsilon = privacy_engine.accountant.get_epsilon(delta=1e-5)
            print(
                f"Train Epoch: {epoch} \t"
                f"Loss: {np.mean(losses):.6f} "
                f"(ε = {epsilon:.2f}, δ = 1e-5)"
            )
        # else:
        #     print(f"Train Epoch: {epoch} \t Loss: {np.mean(losses):.6f}")

        train_acc = self.test_model_acc(train_loader)
        test_acc = self.test_model_acc(test_loader)
        print('epoch %s: train acc %s | test acc %s ' % (epoch,  round(train_acc, 4),  round(test_acc, 4)))
        # self.logger.debug('epoch %s: train acc %s | test acc %s | ovf %s' % (epoch, train_acc, test_acc, train_acc - test_acc))
        run_result.append([epoch, np.mean(losses), train_acc, test_acc])