Process finished with exit code 139 when running on CUDA

Hi,
I am new to pytorch, currently fighting with an issue but could not solve it so far. I have written a simple linear regression problem in pytorch. Here are the files in github:

The model can be found in the same directory in the models.py file.

When training if I use cuda after few epochs I am getting the Process finished with exit code 139 (interrupted by signal 11: SIGSEGV) in a debian based machine. But the code runs fine when running on CPU here is the file in github. Does any one know where is the problem. I am using python 3.8.0 and torch 1.8.1+cu111

Thanks

Could you run the script with gdb and check, if you get a valid stacktrace via:

gdb --args python script.py
...
run
...
bt

Here is the full trace. But I have no clue what it is about:

(gdb) run
Starting program: /home/datapsycho/miniconda3/envs/mytorch/bin/python projects/b1_simple_linear_reg.py
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
[Detaching after fork from child process 7881]
[New Thread 0x7ffefad40700 (LWP 7886)]
[New Thread 0x7ffefa53f700 (LWP 7887)]
[New Thread 0x7ffef7d3e700 (LWP 7888)]
[New Thread 0x7ffef353d700 (LWP 7889)]
[New Thread 0x7ffef0d3c700 (LWP 7890)]
[New Thread 0x7ffeee53b700 (LWP 7891)]
[New Thread 0x7ffeebd3a700 (LWP 7892)]
[New Thread 0x7ffed0b11700 (LWP 7893)]
[New Thread 0x7ffebffff700 (LWP 7894)]
[New Thread 0x7ffebf7fe700 (LWP 7895)]
[New Thread 0x7ffebe56f700 (LWP 7896)]
Epoch:   0%|                                                                                                                       | 0/20 [00:00<?, ?it/s][New Thread 0x7ffebcb43700 (LWP 7897)]                                                                                            | 0/200 [00:00<?, ?it/s]
Epoch:  75%|██████████████████████████████████████████████████████████████████████████████████▌                           | 15/20 [00:01<00:00, 12.00it/s]--Type <RET> for more, q to quit, c to continue without paging--c

Thread 13 "python" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7ffebcb43700 (LWP 7897)]
0x00007fffe8aca5f2 in cudart::threadLaunchState::popConfigForLaunch(cudart::configData&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda.so
(gdb) bt
#0  0x00007fffe8aca5f2 in cudart::threadLaunchState::popConfigForLaunch(cudart::configData&) ()
   from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda.so
#1  0x00007fffe8ac0662 in __cudaPopCallConfiguration ()
   from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda.so
#2  0x00007fffe7e53d26 in void gemmk1_kernel<float, 256, 5, false, false, false, false, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float>, float>(cublasGemmk1Params<float, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float>, float, biasType<cublasGemvTensorStridedBatched<float>::value_type, float>::type>)
    () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda.so
#3  0x00007fff96eaea0c in cublasSgemmRecursiveEntry(cublasContext*, int, int, int, int, int, float const*, float const*, int, float const*, int, float const*, float*, int) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#4  0x00007fff96eaf377 in cublasSgemm_v2 () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#5  0x00007fff94d878d9 in void at::cuda::blas::gemm<float>(char, char, long, long, long, float, float const*, long, float const*, long, float, float*, long) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#6  0x00007fff95fa38f0 in at::native::(anonymous namespace)::addmm_out_cuda_impl(at::Tensor&, at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::Scalar, c10::Scalar) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#7  0x00007fff95fa5092 in at::native::mm_cuda(at::Tensor const&, at::Tensor const&) ()
   from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#8  0x00007fff94dde848 in at::(anonymous namespace)::(anonymous namespace)::wrapper_mm(at::Tensor const&, at::Tensor const&) ()
   from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#9  0x00007fff94dde88f in c10::impl::wrap_kernel_functor_unboxed_<c10::impl::detail::WrapFunctionIntoFunctor_<c10::CompileTimeFunctionPointer<at::Tensor (at::Tensor const&, at::Tensor const&), &at::(anonymous namespace)::(anonymous namespace)::wrapper_mm>, at::Tensor, c10::guts::typelist::typelist<at::Tensor const&, at::Tensor const&> >, at::Tensor (at::Tensor const&, at::Tensor const&)>::call(c10::OperatorKernel*, at::Tensor const&, at::Tensor const&) ()
   from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cuda_cu.so
#10 0x00007fff8325a596 in at::Tensor c10::Dispatcher::call<at::Tensor, at::Tensor const&, at::Tensor const&>(c10::TypedOperatorHandle<at::Tensor (at::Tens--Type <RET> for more, q to quit, c to continue without paging--c
or const&, at::Tensor const&)> const&, at::Tensor const&, at::Tensor const&) const [clone .constprop.4808] () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#11 0x00007fff8325fbbf in at::mm(at::Tensor const&, at::Tensor const&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#12 0x00007fff84b4391c in torch::autograd::VariableType::(anonymous namespace)::mm(at::Tensor const&, at::Tensor const&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#13 0x00007fff84b43e8f in c10::impl::wrap_kernel_functor_unboxed_<c10::impl::detail::WrapFunctionIntoFunctor_<c10::CompileTimeFunctionPointer<at::Tensor (at::Tensor const&, at::Tensor const&), &torch::autograd::VariableType::(anonymous namespace)::mm>, at::Tensor, c10::guts::typelist::typelist<at::Tensor const&, at::Tensor const&> >, at::Tensor (at::Tensor const&, at::Tensor const&)>::call(c10::OperatorKernel*, at::Tensor const&, at::Tensor const&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#14 0x00007fff836ac7d6 in at::Tensor c10::Dispatcher::call<at::Tensor, at::Tensor const&, at::Tensor const&>(c10::TypedOperatorHandle<at::Tensor (at::Tensor const&, at::Tensor const&)> const&, at::Tensor const&, at::Tensor const&) const [clone .constprop.1487] () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#15 0x00007fff836b09ff in at::Tensor::mm(at::Tensor const&) const () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#16 0x00007fff85583a23 in torch::autograd::generated::details::mm_mat2_backward(at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::Scalar const&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#17 0x00007fff84958023 in torch::autograd::generated::AddmmBackward::apply(std::vector<at::Tensor, std::allocator<at::Tensor> >&&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#18 0x00007fff85015771 in torch::autograd::Node::operator()(std::vector<at::Tensor, std::allocator<at::Tensor> >&&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#19 0x00007fff8501157b in torch::autograd::Engine::evaluate_function(std::shared_ptr<torch::autograd::GraphTask>&, torch::autograd::Node*, torch::autograd::InputBuffer&, std::shared_ptr<torch::autograd::ReadyQueue> const&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#20 0x00007fff8501219f in torch::autograd::Engine::thread_main(std::shared_ptr<torch::autograd::GraphTask> const&) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#21 0x00007fff85009979 in torch::autograd::Engine::thread_init(int, std::shared_ptr<torch::autograd::ReadyQueue> const&, bool) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so
#22 0x00007ffff578d293 in torch::autograd::python::PythonEngine::thread_init(int, std::shared_ptr<torch::autograd::ReadyQueue> const&, bool) () from /home/datapsycho/miniconda3/envs/mytorch/lib/python3.8/site-packages/torch/lib/libtorch_python.so
#23 0x00007ffff67a4d84 in ?? () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6
#24 0x00007ffff7f93609 in start_thread (arg=<optimized out>) at pthread_create.c:477
#25 0x00007ffff7eba293 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
(gdb) q

Thanks!
Could you link or post an executable code snippet, as you are currently linking to lectures and I’m not sure which models is used? Also, which GPU are you using?

If you pull the whole repo the following should produce the the same result:

$ cd <project root where the chapter2 package and project directory is>
$ export PYTHONPATH=.;
$ gdb --args python projects/b1_simple_linear_reg.py

The chapter2 is the package module from where I import model, data generator, data loader in to b1_simple_linear_reg.py and use that file as main file for training. Let me know if that will work otherwise I will put everyting into one file.

I am using GeForce GTX 1050 Ti Max-Q , here is the specification:

  Operating System: Linux Mint 20 Uliyana
            Kernel: Linux 5.4.0-70-generic
      Architecture: x86-64

Gpu Info:

Mon Mar 29 11:35:39 2021       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 450.51.06    Driver Version: 450.51.06    CUDA Version: 11.0     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  GeForce GTX 105...  On   | 00000000:02:00.0 Off |                  N/A |
| N/A   36C    P8    N/A /  N/A |    753MiB /  4042MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+

Hi,
I have also tried with the classical cifar data set and generate the same error result. Here is the code snippit:

import torch
import torchvision
import torchvision.transforms as transforms

import matplotlib.pyplot as plt
import numpy as np

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=1,
                                          shuffle=True, num_workers=1)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=1,
                                         shuffle=False, num_workers=1)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


def imshow(img):
    img = img / 2 + 0.5  # un-normalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()

# show images
imshow(torchvision.utils.make_grid(images))
# print labels
# print(' '.join('%5s' % classes[labels[j]] for j in range(4)))


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

device = torch.device("cuda:0")
print(device)
net.to(device)


for epoch in range(2):

    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):

        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()

        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if i % 2000 == 1999:    # print every 2000 mini-batches♠
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, i + 1, running_loss / 2000))
            running_loss = 0.0

print('Finished Training')

But surprisingly I was able to train mnist data with Flux.jl using GPU. Which is a Julia Deep Learning framework.