RuntimeError: CUDA error: device-side assert triggered
Training process working on CPU , but showing error on GPU . I don’t know why , there was no change in code too . could anyone please help me with this error , Thanks in Advance .
error while using GPU on Colab : Tesla K80
===================== ERROR ========================
RuntimeError Traceback (most recent call last)
in ()
----> 1 model = cycle(model = “resnet” , path = “weights.pth” , epochs = 4 , depth = 0 , lr = 1e-3 , freeze = True)
in cycle(model, path, epochs, depth, lr, freeze)
40 criterion = nn.CrossEntropyLoss()
41
—> 42 model.to(device)
43
44 train_acc = []
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in to(self, *args, **kwargs)
379 return t.to(device, dtype if t.is_floating_point() else None, non_blocking)
380
–> 381 return self._apply(convert)
382
383 def register_backward_hook(self, hook):
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _apply(self, fn)
185 def _apply(self, fn):
186 for module in self.children():
–> 187 module._apply(fn)
188
189 for param in self._parameters.values():
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _apply(self, fn)
191 # Tensors stored in modules are graph leaves, and we don’t
192 # want to create copy nodes, so we have to unpack the data.
–> 193 param.data = fn(param.data)
194 if param._grad is not None:
195 param._grad.data = fn(param._grad.data)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in convert(t)
377
378 def convert(t):
–> 379 return t.to(device, dtype if t.is_floating_point() else None, non_blocking)
380
381 return self._apply(convert)
RuntimeError: CUDA error: device-side assert triggered