RuntimeError: CUDA error: device-side assert triggered , while doing model.cuda() . working fine on CPU


RuntimeError Traceback (most recent call last)
in ()
----> 1 model = cycle(model = “resnet” , path = “Classify.pth” , epochs = 6 , depth = 0 , lr = 1e-3 , freeze = True)

in cycle(model, path, epochs, depth, lr, freeze)
42 criterion = nn.CrossEntropyLoss()
43
—> 44 model.to(device)
45 print(" Error here ? ")
46

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in to(self, *args, **kwargs)
379 return t.to(device, dtype if t.is_floating_point() else None, non_blocking)
380
–> 381 return self._apply(convert)
382
383 def register_backward_hook(self, hook):

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _apply(self, fn)
185 def _apply(self, fn):
186 for module in self.children():
–> 187 module._apply(fn)
188
189 for param in self._parameters.values():

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _apply(self, fn)
191 # Tensors stored in modules are graph leaves, and we don’t
192 # want to create copy nodes, so we have to unpack the data.
–> 193 param.data = fn(param.data)
194 if param._grad is not None:
195 param._grad.data = fn(param._grad.data)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in convert(t)
377
378 def convert(t):
–> 379 return t.to(device, dtype if t.is_floating_point() else None, non_blocking)
380
381 return self._apply(convert)

RuntimeError: CUDA error: device-side assert triggered