IndexError : tuple index out of range during backward pass

I was to attack a model with adversarial examples
and got IndexError : tuple index out of range during backward pass

     35         # Calculate gradients of model in backward pass
---> 36         loss.backward()
     37 
     38         # Collect datagrad

~/anaconda3/envs/yoon/lib/python3.6/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
    116                 products. Defaults to ``False``.
    117         """
--> 118         torch.autograd.backward(self, gradient, retain_graph, create_graph)
    119 
    120     def register_hook(self, hook):

~/anaconda3/envs/yoon/lib/python3.6/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
     91     Variable._execution_engine.run_backward(
     92         tensors, grad_tensors, retain_graph, create_graph,
---> 93         allow_unreachable=True)  # allow_unreachable flag
     94 
     95 

~/anaconda3/envs/yoon/lib/python3.6/site-packages/torch/autograd/function.py in apply(self, *args)
     75 
     76     def apply(self, *args):
---> 77         return self._forward_cls.backward(self, *args)
     78 
     79 

~/Block-wise-Scrambled-Image-Recognition/shakedrop.py in backward(ctx, grad_output)
     25     @staticmethod
     26     def backward(ctx, grad_output):
---> 27         gate = ctx.saved_tensors[0]
     28         if gate.item() == 0:
     29             beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_(0, 1)

IndexError: tuple index out of range

but I can get loss value

fgsm
output is tensor([[-1.0937, -2.6928,  1.2549,  2.1249,  1.4102,  2.1192,  1.3507, -0.2068,
         -1.5742, -2.8067]], device='cuda:0', grad_fn=<AddmmBackward>)
target is tensor([3], device='cuda:0')
tensor(-2.1249, device='cuda:0', grad_fn=<NllLossBackward>)

test code was this ( batch size is 1 to check if original input is classified well )

def test( model, device, test_loader, epsilon, attack_name ):
    

    # Accuracy counter
    correct = 0
    adv_examples = []
    print(attack_name)
    # Loop over all examples in test set
    for data, target in test_loader:

                
        # Send the data and label to the device
        data, target = data.to(device), target.to(device)

        # Set requires_grad attribute of tensor. Important for Attack
        data.requires_grad = True

        # Forward pass the data through the model
        output = model(data)
        print("output is",output)
        print("target is", target)
        init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
        

        # If the initial prediction is wrong, dont bother attacking, just move on
        if init_pred.item() != target.item():
            continue
        # Calculate the loss
        loss = F.nll_loss(output, target)
        print(loss)

        # Zero all existing gradients
        model.zero_grad()
an
        # Calculate gradients of model in backward pass
        loss.backward()

        # Collect datagrad
        data_grad = data.grad.data
        # Call attack

dataset is cifar10
and I used model pyramidnet

class ShakeBasicBlock(nn.Module):

    def __init__(self, in_ch, out_ch, stride=1, p_shakedrop=1.0):
        super(ShakeBasicBlock, self).__init__()
        self.downsampled = stride == 2
        self.branch = self._make_branch(in_ch, out_ch, stride=stride)
        self.shortcut = not self.downsampled and None or nn.AvgPool2d(2)
        self.shake_drop = ShakeDrop(p_shakedrop)

    def forward(self, x):
        h = self.branch(x)
        h = self.shake_drop(h)
        h0 = x if not self.downsampled else self.shortcut(x)
        pad_zero = Variable(torch.zeros(h0.size(0), h.size(1) - h0.size(1), h0.size(2), h0.size(3)).float()).cuda()
        h0 = torch.cat([h0, pad_zero], dim=1)

        return h + h0

    def _make_branch(self, in_ch, out_ch, stride=1):
        return nn.Sequential(
            nn.BatchNorm2d(in_ch),
            nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False),
            nn.BatchNorm2d(out_ch))


class ShakePyramidNet(nn.Module):

    def __init__(self, depth=110, alpha=270, label=10):
        super(ShakePyramidNet, self).__init__()
        in_ch = 16
        # for BasicBlock
        n_units = (depth - 2) // 6
        in_chs = [in_ch] + [in_ch + math.ceil((alpha / (3 * n_units)) * (i + 1)) for i in range(3 * n_units)]
        block = ShakeBasicBlock

        self.in_chs, self.u_idx = in_chs, 0
        self.ps_shakedrop = [1 - (1.0 - (0.5 / (3 * n_units)) * (i + 1)) for i in range(3 * n_units)]

        self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
        self.bn_in = nn.BatchNorm2d(in_chs[0])
        self.layer1 = self._make_layer(n_units, block, 1)
        self.layer2 = self._make_layer(n_units, block, 2)
        self.layer3 = self._make_layer(n_units, block, 2)
        self.bn_out = nn.BatchNorm2d(in_chs[-1])
        self.fc_out = nn.Linear(in_chs[-1], label)

        # Initialize paramters
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

    def forward(self, x):
        h = self.bn_in(self.c_in(x))
        feature = h
        h = self.layer1(h)
        h = self.layer2(h)
        h = self.layer3(h)
        h = F.relu(self.bn_out(h))
        h = F.avg_pool2d(h, 8)
        h = h.view(h.size(0), -1)
        h = self.fc_out(h)
        return h

    def _make_layer(self, n_units, block, stride=1):
        layers = []
        for i in range(int(n_units)):
            layers.append(block(self.in_chs[self.u_idx], self.in_chs[self.u_idx+1],
                                stride, self.ps_shakedrop[self.u_idx]))
            self.u_idx, stride = self.u_idx + 1, 1
        return nn.Sequential(*layers)

Do you know how to solve this problem?

The error code points to the custom function in shakedrop.py.
It seems that you try to index the saved_tensors but you did you save any Tensor during the forward?

oh why I didn’t see that sentence…!
I should see that function first.
thank you for the answer

1 Like

I have the same problem can you tell me How to modify it??? thank you.