RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:36

Hi everyone,
I’m trying to run training on imagenet, but I face the mentioned error! I have no idea what is causing the issue. Any help is greatly appreciated .
Here is the full log :

=> creating model 'resnet18'
=> Model : ResNet(
  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
  (relu): ReLU(inplace)
  (maxpool): MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), ceil_mode=False)
  (layer1): Sequential(
    (0): BasicBlock(
      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
    )
    (1): BasicBlock(
      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
    )
  )
  (layer2): Sequential(
    (0): BasicBlock(
      (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
      (downsample): Sequential(
        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
      )
    )
    (1): BasicBlock(
      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
    )
  )
  (layer3): Sequential(
    (0): BasicBlock(
      (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
      (downsample): Sequential(
        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
      )
    )
    (1): BasicBlock(
      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
    )
  )
  (layer4): Sequential(
    (0): BasicBlock(
      (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
      (downsample): Sequential(
        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
      )
    )
    (1): BasicBlock(
      (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
      (relu): ReLU(inplace)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
    )
  )
  (avgpool): AvgPool2d(kernel_size=7, stride=7, padding=0, ceil_mode=False, count_include_pad=True)
  (fc): Linear(in_features=512, out_features=1000, bias=True)
)
=> parameter : Namespace(arch='resnet18', batch_size=128, data='/media/ShishoSama/DataSection/DeepLearning/ImageNet_DataSet', epochs=100, evaluate=False, lr=0.1, momentum=0.9, prefix='2018-05-03-8661', print_freq=200, resume='', save_dir='./snapshots/resnet18/', start_epoch=0, train_dir_name='training_set_t12/', val_dir_name='imagenet_val/', weight_decay=0.0001, workers=2)
/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:397: UserWarning: The use of the transforms.RandomSizedCrop transform is deprecated, please use transforms.RandomResizedCrop instead.
  "please use transforms.RandomResizedCrop instead.")
/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:156: UserWarning: The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.
  "please use transforms.Resize instead.")
 [resnet18] ::   0/100 ----- [[2018-05-03 14:25:35]] [Need: 00:00:00]
Epoch: [0][0/10010]	Time 8.814 (8.814)	Data 6.929 (6.929)	Loss 7.0629 (7.0629)	Prec@1 0.000 (0.000)	Prec@5 0.000 (0.000)
/pytorch/torch/lib/THCUNN/ClassNLLCriterion.cu:101: void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *, Dtype *, Dtype *, long *, Dtype *, int, int, int, int, long) [with Dtype = float, Acctype = float]: block: [0,0,0], thread: [0,0,0] Assertion `t >= 0 && t < n_classes` failed.
THCudaCheck FAIL file=/pytorch/torch/lib/THC/generic/THCStorage.c line=36 error=59 : device-side assert triggered
Traceback (most recent call last):
  File "imagenet_train.py", line 306, in <module>
    main()
  File "imagenet_train.py", line 140, in main
    train(train_loader, model, criterion, optimizer, epoch, log)
  File "imagenet_train.py", line 186, in train
    losses.update(loss.data[0], input.size(0))
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:36
terminate called after throwing an instance of 'std::runtime_error'
  what():  cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:184
Aborted (core dumped)

and the snippet in question is this :

def train(train_loader, model, criterion, optimizer, epoch, log):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print_log('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5), log)

Here is the definition of AverageMeter , and this is the whole training script

What is happening here ? (by the way I’m using pytorch 3.0)
Thanks alot in advance

1 Like

Hi,

First thing is to try to run the code on CPU. CPU code has more checks so it will possibly return a better error message.
If the CPU code runs without error, then run the same thing with CUDA_LAUNCH_BLOCKING=1 to get a proper error message and stack trace.

1 Like

Thank you very much,
mean while I added some print() to the train function and here is the log I get:

def train(train_loader, model, criterion, optimizer, epoch, log):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var)
        print('output: ',output.shape)
        print('output(target_var): ',target_var.shape)
        loss = criterion(output, target_var)
        
        print('loss: ',loss.shape)  
        
        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        print('input.size(): ', input.size(0))
        print('loss.data[0]: ', loss.data[0])

        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print_log('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5), log)

Log :

/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:397: UserWarning: The use of the transforms.RandomSizedCrop transform is deprecated, please use transforms.RandomResizedCrop instead.
  "please use transforms.RandomResizedCrop instead.")
/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:156: UserWarning: The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.
  "please use transforms.Resize instead.")
 [resnet18] ::   0/100 ----- [[2018-05-03 15:03:49]] [Need: 00:00:00]
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.064196586608887
Epoch: [0][0/10010]	Time 8.969 (8.969)	Data 7.235 (7.235)	Loss 7.0642 (7.0642)	Prec@1 0.000 (0.000)	Prec@5 0.000 (0.000)
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.059842586517334
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.111576080322266
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.147580146789551
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.028686046600342
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.062038421630859
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.18644380569458
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.259083271026611
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.360715866088867
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.253360748291016
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.305577278137207
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.355863094329834
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.292840003967285
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.266591548919678
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
/pytorch/torch/lib/THCUNN/ClassNLLCriterion.cu:101: void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *, Dtype *, Dtype *, long *, Dtype *, int, int, int, int, long) [with Dtype = float, Acctype = float]: block: [0,0,0], thread: [9,0,0] Assertion `t >= 0 && t < n_classes` failed.
THCudaCheck FAIL file=/pytorch/torch/lib/THC/generic/THCStorage.c line=36 error=59 : device-side assert triggered
Traceback (most recent call last):
  File "imagenet_train.py", line 313, in <module>
    main()
  File "imagenet_train.py", line 140, in main
    train(train_loader, model, criterion, optimizer, epoch, log)
  File "imagenet_train.py", line 191, in train
    print('loss.data[0]: ', loss.data[0])
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:36
terminate called after throwing an instance of 'std::runtime_error'
  what():  cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:184
Aborted (core dumped)

It seems it runs for several iterations and then it crashes!
Concerning running on CPU mode only, how should I do that? I’m a newbie in pytorch. I would greatly appreciate if you tell me what to change. would simply removing cuda() part do it?

Its weird! run crashes randomly! my second run took much longer to crash! :

/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:397: UserWarning: The use of the transforms.RandomSizedCrop transform is deprecated, please use transforms.RandomResizedCrop instead.
  "please use transforms.RandomResizedCrop instead.")
/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:156: UserWarning: The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.
  "please use transforms.Resize instead.")
 [resnet18] ::   0/100 ----- [[2018-05-03 15:10:19]] [Need: 00:00:00]
i:  0
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.9907402992248535
Epoch: [0][0/10010]	Time 8.867 (8.867)	Data 7.150 (7.150)	Loss 6.9907 (6.9907)	Prec@1 0.000 (0.000)	Prec@5 0.781 (0.781)
i:  1
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.086461544036865
i:  2
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.06037712097168
i:  3
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.140486240386963
i:  4
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.094625949859619
i:  5
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.328759670257568
i:  6
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.284300327301025
i:  7
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.137808322906494
i:  8
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.297905445098877
i:  9
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.161623477935791
i:  10
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.206675052642822
i:  11
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.358131408691406
i:  12
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.334465503692627
i:  13
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.099543571472168
i:  14
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.368229866027832
i:  15
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.365810871124268
i:  16
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.326346397399902
i:  17
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.321466445922852
i:  18
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.44693660736084
i:  19
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.245199203491211
i:  20
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.311239719390869
i:  21
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.303903102874756
i:  22
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.213065147399902
i:  23
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.367886066436768
i:  24
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.063688278198242
i:  25
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.990291595458984
i:  26
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.130172252655029
i:  27
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.1098103523254395
i:  28
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.089640140533447
i:  29
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.1106367111206055
i:  30
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.049175262451172
i:  31
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.112464904785156
i:  32
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.072038650512695
i:  33
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.1823554039001465
i:  34
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.088671684265137
i:  35
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.0520429611206055
i:  36
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.061910152435303
i:  37
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.959447383880615
i:  38
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.97224760055542
i:  39
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.0115132331848145
i:  40
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.01668643951416
i:  41
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.067199230194092
i:  42
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.904612064361572
i:  43
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.920835971832275
i:  44
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.9110636711120605
i:  45
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.008086204528809
i:  46
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.999253749847412
i:  47
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.9378981590271
i:  48
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.049530029296875
i:  49
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.937681198120117
i:  50
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.913029193878174
i:  51
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.0122599601745605
i:  52
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  6.941130638122559
i:  53
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
/pytorch/torch/lib/THCUNN/ClassNLLCriterion.cu:101: void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *, Dtype *, Dtype *, long *, Dtype *, int, int, int, int, long) [with Dtype = float, Acctype = float]: block: [0,0,0], thread: [8,0,0] Assertion `t >= 0 && t < n_classes` failed.
THCudaCheck FAIL file=/pytorch/torch/lib/THC/generic/THCStorage.c line=36 error=59 : device-side assert triggered
Traceback (most recent call last):
  File "imagenet_train.py", line 313, in <module>
    main()
  File "imagenet_train.py", line 140, in main
    train(train_loader, model, criterion, optimizer, epoch, log)
  File "imagenet_train.py", line 191, in train
    print('loss.data[0]: ', loss.data[0])
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:36
terminate called after throwing an instance of 'std::runtime_error'
  what():  cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:184
Aborted (core dumped)

Yes,
To run on cpu, just remove all the .cuda().
Also when running on GPU, use CUDA_LAUNCH_BLOCKING=1 otherwise the stack trace is going to be wrong and it will print the exact error !

1 Like

I ran my script with CUDA_LAUNCH_BLOCKING=1 like this :
CUDA_LAUNCH_BLOCKING=1 python3 imagenet_train.py args

and the error log is as follows :

/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:397: UserWarning: The use of the transforms.RandomSizedCrop transform is deprecated, please use transforms.RandomResizedCrop instead.
  "please use transforms.RandomResizedCrop instead.")
/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py:156: UserWarning: The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.
  "please use transforms.Resize instead.")
 [resnet18] ::   0/100 ----- [[2018-05-03 16:14:05]] [Need: 00:00:00]
i:  0
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.016279220581055
Epoch: [0][0/10010]	Time 8.858 (8.858)	Data 7.036 (7.036)	Loss 7.0163 (7.0163)	Prec@1 0.000 (0.000)	Prec@5 0.000 (0.000)
i:  1
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.077082633972168
i:  2
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.124829292297363
i:  3
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.023961544036865
i:  4
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.081689834594727
i:  5
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.205571174621582
i:  6
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.2464823722839355
i:  7
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.15701150894165
i:  8
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
loss:  torch.Size([1])
input.size():  128
loss.data[0]:  7.310776233673096
i:  9
output:  torch.Size([128, 1000])
output(target_var):  torch.Size([128])
/pytorch/torch/lib/THCUNN/ClassNLLCriterion.cu:101: void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *, Dtype *, Dtype *, long *, Dtype *, int, int, int, int, long) [with Dtype = float, Acctype = float]: block: [0,0,0], thread: [10,0,0] Assertion `t >= 0 && t < n_classes` failed.
THCudaCheck FAIL file=/pytorch/torch/lib/THCUNN/generic/ClassNLLCriterion.cu line=113 error=59 : device-side assert triggered
Traceback (most recent call last):
  File "imagenet_train.py", line 313, in <module>
    main()
  File "imagenet_train.py", line 140, in main
    train(train_loader, model, criterion, optimizer, epoch, log)
  File "imagenet_train.py", line 184, in train
    loss = criterion(output, target_var)
  File "/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 357, in __call__
    result = self.forward(*input, **kwargs)
  File "/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torch/nn/modules/loss.py", line 679, in forward
    self.ignore_index, self.reduce)
  File "/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 1161, in cross_entropy
    return nll_loss(log_softmax(input, 1), target, weight, size_average, ignore_index, reduce)
  File "/home/ShishoSama/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 1052, in nll_loss
    return torch._C._nn.nll_loss(input, target, weight, size_average, ignore_index, reduce)
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THCUNN/generic/ClassNLLCriterion.cu:113
terminate called after throwing an instance of 'std::runtime_error'
  what():  cuda runtime error (59) : device-side assert triggered at /pytorch/torch/lib/THC/generic/THCStorage.c:184
Aborted (core dumped)

The line that is interesting is this one: /pytorch/torch/lib/THCUNN/ClassNLLCriterion.cu:101: void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *, Dtype *, Dtype *, long *, Dtype *, int, int, int, int, long) [with Dtype = float, Acctype = float]: block: [0,0,0], thread: [10,0,0] Assertiont >= 0 && t < n_classes failed..
In the ClassNLLCriterion kernel, the Assertion t >= 0 && t < n_classes failed. so I guess one of the elements of target_var is either smaller than 0 or larger than the number of classes (output size) :slight_smile: You might want to check the content of your labels for your dataset, one look like it’s not valid.

3 Likes

Thank you very much, yes thats indeed the case, one of the labels was 1000!
Got it, there was one empty folder there!
Thankyou a gazillion times :slight_smile:

Hi my error seems to be similar but i can’t figure out what’s wrong. Any help would be much appreciated.