IndexError: too many indices for tensor of dimension 3

Hello,
I am getting the below error while trying to finetune my own data set.

Traceback (most recent call last):
File “finetune.py”, line 254, in
main()
File “finetune.py”, line 127, in main
train(TrainImgLoader, model, optimizer, log, epoch)
File “finetune.py”, line 171, in train
for x in range(num_out)]
File “finetune.py”, line 171, in
for x in range(num_out)]
IndexError: too many indices for tensor of dimension 3

This is the finetune.py code. Please let me know how to do this :slight_smile:

Based on the error message it seems the error is raised in this line of code.
However, it seems there is no tensor involved in these operations.

Generally you would get this error, if you are trying to index a 4th dimension for a 3-dimensional tensor:

x = torch.randn(1, 2, 3)
x[0, 0, 0, 0]
> IndexError: too many indices for tensor of dimension 3

Thank you for your reply! :smiley:

Here’s the code where I think this is happening:

import torch
import torchvision.transforms as transforms
import random

__imagenet_stats = {‘mean’: [0.485, 0.456, 0.406],
‘std’: [0.229, 0.224, 0.225]}

#__imagenet_stats = {‘mean’: [0.5, 0.5, 0.5],

‘std’: [0.5, 0.5, 0.5]}

__imagenet_pca = {
‘eigval’: torch.Tensor([0.2175, 0.0188, 0.0045]),
‘eigvec’: torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}

def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
#if scale_size != input_size:
#t_list = [transforms.Scale((960,540))] + t_list

return transforms.Compose(t_list)

def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list

transforms.Compose(t_list)

def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
padding = int((scale_size - input_size) / 2)
return transforms.Compose([
transforms.RandomCrop(input_size, padding=padding),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
])

def inception_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomSizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
def inception_color_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
#transforms.RandomSizedCrop(input_size),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
Lighting(0.1, __imagenet_pca[‘eigval’], __imagenet_pca[‘eigvec’]),
transforms.Normalize(**normalize)
])

def get_transform(name=‘imagenet’, input_size=None,
scale_size=None, normalize=None, augment=True):
normalize = __imagenet_stats
input_size = 256
if augment:
return inception_color_preproccess(input_size, normalize=normalize)
else:
return scale_crop(input_size=input_size,
scale_size=scale_size, normalize=normalize)

class Lighting(object):
“”“Lighting noise(AlexNet - style PCA - based noise)”“”

def __init__(self, alphastd, eigval, eigvec):
    self.alphastd = alphastd
    self.eigval = eigval
    self.eigvec = eigvec

def __call__(self, img):
    if self.alphastd == 0:
        return img

    alpha = img.new().resize_(3).normal_(0, self.alphastd)
    rgb = self.eigvec.type_as(img).clone()\
        .mul(alpha.view(1, 3).expand(3, 3))\
        .mul(self.eigval.view(1, 3).expand(3, 3))\
        .sum(1).squeeze()

    return img.add(rgb.view(3, 1, 1).expand_as(img))

class Grayscale(object):

def __call__(self, img):
    gs = img.clone()
    gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])
    gs[1].copy_(gs[0])
    gs[2].copy_(gs[0])
    return gs

class Saturation(object):

def __init__(self, var):
    self.var = var

def __call__(self, img):
    gs = Grayscale()(img)
    alpha = random.uniform(0, self.var)
    return img.lerp(gs, alpha)

class Brightness(object):

def __init__(self, var):
    self.var = var

def __call__(self, img):
    gs = img.new().resize_as_(img).zero_()
    alpha = random.uniform(0, self.var)
    return img.lerp(gs, alpha)

class Contrast(object):

def __init__(self, var):
    self.var = var

def __call__(self, img):
    gs = Grayscale()(img)
    gs.fill_(gs.mean())
    alpha = random.uniform(0, self.var)
    return img.lerp(gs, alpha)

class RandomOrder(object):
“”" Composes several transforms together in random order.
“”"

def __init__(self, transforms):
    self.transforms = transforms

def __call__(self, img):
    if self.transforms is None:
        return img
    order = torch.randperm(len(self.transforms))
    for i in order:
        img = self.transforms[i](img)
    return img

class ColorJitter(RandomOrder):

def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):
    self.transforms = []
    if brightness != 0:
        self.transforms.append(Brightness(brightness))
    if contrast != 0:
        self.transforms.append(Contrast(contrast))
    if saturation != 0:
        self.transforms.append(Saturation(saturation))

How can I correct this?

Thanks in advance :slight_smile:

Which line of code is throwing the error?
The code is currently not formatted, which makes debugging hard.
You can add code snippets using three backticks ``` :wink:

This is the error I am getting now.

Traceback (most recent call last):
  File "finetune.py", line 254, in <module>
    main()
  File "finetune.py", line 127, in main
    train(TrainImgLoader, model, optimizer, log, epoch)
  File "finetune.py", line 170, in train
    loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
  File "finetune.py", line 170, in <listcomp>
    loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
IndexError: too many indices for tensor of dimension 3

Here’s the code snippet:

 for epoch in range(args.start_epoch, args.epochs):
        log.info('This is {}-th epoch'.format(epoch))
        adjust_learning_rate(optimizer, epoch)

        train(TrainImgLoader, model, optimizer, log, epoch)

        savefilename = args.save_path + '/checkpoint.tar'
        torch.save({
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, savefilename)

        if epoch % 1 ==0:
            test(TestImgLoader, model, log)

    test(TestImgLoader, model, log)
    log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))


def train(dataloader, model, optimizer, log, epoch=0):

    stages = 3 + args.with_spn
    losses = [AverageMeter() for _ in range(stages)]
    length_loader = len(dataloader)

    model.train()

    for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
        imgL = imgL.float()#.cuda()
        imgR = imgR.float()#.cuda()
        disp_L = disp_L.float()#.cuda()

        optimizer.zero_grad()
        mask = disp_L > 0
        mask.detach_()
        outputs = model(imgL, imgR)

        if args.with_spn:
            if epoch >= args.start_epoch_for_spn:
                num_out = len(outputs)
            else:
                num_out = len(outputs) - 1
        else:
            num_out = len(outputs)

        outputs = [torch.squeeze(output, 1) for output in outputs]
        loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
                for x in range(num_out)]
        sum(loss).backward()
        optimizer.step()

Thank you for your time :slight_smile: :smiley: :smiley:

Thanks for the stack trace and code.
Based on the code and error message, I would guess outputs[x][mask] or disp_L[mask] is raising this error.
Could you check the shape of all tensors in this operation and the values of mask?

Thanks for your reply! :smiley:

I did print(mask.shape) and print(disp_L[mask].shape). The shape is:

torch.Size([2, 256, 512, 4])
torch.Size([484522])

Is this correct?

Also, disp_L shape is torch.Size([2, 256, 512, 4])
And, outputs is list and it’s length is 3

Thanks for your time :smiley: