_thnn_conv2d_forward

torch.onnx.export(model,dummy_input,model_name,verbose=True,
input_names=[input_name],output_names=[output_name])


RuntimeError Traceback (most recent call last)
in
1 torch.onnx.export(model,dummy_input,model_name,verbose=True,
----> 2 input_names=[input_name],output_names=[output_name])

~\anaconda3\envs\fastai\lib\site-packages\torch\onnx_init_.py in export(model, args, f, export_params, verbose, training, input_names, output_names, aten, export_raw_ir, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
146 operator_export_type, opset_version, _retain_param_name,
147 do_constant_folding, example_outputs,
–> 148 strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
149
150

~\anaconda3\envs\fastai\lib\site-packages\torch\onnx\utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, aten, export_raw_ir, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
64 _retain_param_name=_retain_param_name, do_constant_folding=do_constant_folding,
65 example_outputs=example_outputs, strip_doc_string=strip_doc_string,
—> 66 dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs)
67
68

~\anaconda3\envs\fastai\lib\site-packages\torch\onnx\utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, example_outputs, propagate, opset_version, _retain_param_name, do_constant_folding, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size)
414 example_outputs, propagate,
415 _retain_param_name, do_constant_folding,
–> 416 fixed_batch_size=fixed_batch_size)
417
418 # TODO: Don’t allocate a in-memory string for the protobuf

~\anaconda3\envs\fastai\lib\site-packages\torch\onnx\utils.py in _model_to_graph(model, args, verbose, training, input_names, output_names, operator_export_type, example_outputs, propagate, _retain_param_name, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size)
277 model.graph, tuple(in_vars), False, propagate)
278 else:
–> 279 graph, torch_out = _trace_and_get_graph_from_model(model, args, training)
280 state_dict = _unique_state_dict(model)
281 params = list(state_dict.values())

~\anaconda3\envs\fastai\lib\site-packages\torch\onnx\utils.py in _trace_and_get_graph_from_model(model, args, training)
234 # training mode was.)
235 with set_training(model, training):
–> 236 trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(model, args, _force_outplace=True, _return_inputs_states=True)
237 warn_on_static_input_change(inputs_states)
238

~\anaconda3\envs\fastai\lib\site-packages\torch\jit_init_.py in _get_trace_graph(f, args, kwargs, _force_outplace, return_inputs, _return_inputs_states)
275 if not isinstance(args, tuple):
276 args = (args,)
–> 277 outs = ONNXTracedModule(f, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
278 return outs
279

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
–> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)

~\anaconda3\envs\fastai\lib\site-packages\torch\jit_init_.py in forward(self, *args)
358 in_vars + module_state,
359 _create_interpreter_name_lookup_fn(),
–> 360 self._force_outplace,
361 )
362

~\anaconda3\envs\fastai\lib\site-packages\torch\jit_init_.py in wrapper(*args)
345 if self._return_inputs_states:
346 inputs_states.append(_unflatten(args[:len(in_vars)], in_desc))
–> 347 outs.append(self.inner(*trace_inputs))
348 if self._return_inputs_states:
349 inputs_states[0] = (inputs_states[0], trace_inputs)

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
528 input = result
529 if torch._C._get_tracing_state():
–> 530 result = self._slow_forward(*input, **kwargs)
531 else:
532 result = self.forward(*input, **kwargs)

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in _slow_forward(self, *input, **kwargs)
514 recording_scopes = False
515 try:
–> 516 result = self.forward(*input, **kwargs)
517 finally:
518 if recording_scopes:

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
98 def forward(self, input):
99 for module in self:
–> 100 input = module(input)
101 return input
102

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
528 input = result
529 if torch._C._get_tracing_state():
–> 530 result = self._slow_forward(*input, **kwargs)
531 else:
532 result = self.forward(*input, **kwargs)

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in _slow_forward(self, *input, **kwargs)
514 recording_scopes = False
515 try:
–> 516 result = self.forward(*input, **kwargs)
517 finally:
518 if recording_scopes:

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
98 def forward(self, input):
99 for module in self:
–> 100 input = module(input)
101 return input
102

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
528 input = result
529 if torch._C._get_tracing_state():
–> 530 result = self._slow_forward(*input, **kwargs)
531 else:
532 result = self.forward(*input, **kwargs)

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\module.py in _slow_forward(self, *input, **kwargs)
514 recording_scopes = False
515 try:
–> 516 result = self.forward(*input, **kwargs)
517 finally:
518 if recording_scopes:

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
343
344 def forward(self, input):
–> 345 return self.conv2d_forward(input, self.weight)
346
347 class Conv3d(_ConvNd):

~\anaconda3\envs\fastai\lib\site-packages\torch\nn\modules\conv.py in conv2d_forward(self, input, weight)
340 _pair(0), self.dilation, self.groups)
341 return F.conv2d(input, weight, self.bias, self.stride,
–> 342 self.padding, self.dilation, self.groups)
343
344 def forward(self, input):

RuntimeError: Expected object of device type cuda but got device type cpu for argument #1 ‘self’ in call to _thnn_conv2d_forward

It seems your code raises a device mismatch error, i.e. the model and inputs are not both on the GPU.
Try to push both to the device via:

device = 'cuda:0'
model.to(device)
input = input.to(device)

before executing the model (or trying to export it).