I have this script convert Pytorch to ONNX format work well on my Ubuntu laptop. But I ran on Raspberry Pi 4 Buster 32 bit and got this error. I tried this thread and this thread but without luck
pi@raspberrypi:~/thesis $ python3 test_export.py
Traceback (most recent call last):
File "test_export.py", line 34, in <module>
output_names=output_names)
File "/usr/local/lib/python3.7/dist-packages/torch/onnx/__init__.py", line 230, in export
custom_opsets, enable_onnx_checker, use_external_data_format)
File "/usr/local/lib/python3.7/dist-packages/torch/onnx/utils.py", line 91, in export
use_external_data_format=use_external_data_format)
File "/usr/local/lib/python3.7/dist-packages/torch/onnx/utils.py", line 639, in _export
dynamic_axes=dynamic_axes)
File "/usr/local/lib/python3.7/dist-packages/torch/onnx/utils.py", line 411, in _model_to_graph
use_new_jit_passes)
File "/usr/local/lib/python3.7/dist-packages/torch/onnx/utils.py", line 379, in _create_jit_graph
graph, torch_out = _trace_and_get_graph_from_model(model, args)
File "/usr/local/lib/python3.7/dist-packages/torch/onnx/utils.py", line 342, in _trace_and_get_graph_from_model
torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True)
File "/usr/local/lib/python3.7/dist-packages/torch/jit/_trace.py", line 1148, in _get_trace_graph
outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/jit/_trace.py", line 130, in forward
self._force_outplace,
File "/usr/local/lib/python3.7/dist-packages/torch/jit/_trace.py", line 112, in wrapper
tuple(x.clone(memory_format=torch.preserve_format) for x in args)
File "/usr/local/lib/python3.7/dist-packages/torch/jit/_trace.py", line 112, in <genexpr>
tuple(x.clone(memory_format=torch.preserve_format) for x in args)
RuntimeError: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient
Tensor:
(1,1,.,.) =
0.0133 0.0147 -0.0154 -0.0230 -0.0409 -0.0430 -0.0708
0.0041 0.0058 0.0149 0.0206 0.0022 -0.0209 -0.0385
0.0223 0.0236 0.0161 0.0588 0.1028 0.0626 0.0520
0.0232 0.0042 -0.0459 -0.0487 -0.0164 0.0402 0.0658
-0.0009 0.0278 -0.0101 -0.0554 -0.1272 -0.0766 0.0078
0.0036 0.0480 0.0621 0.0844 0.0243 -0.0337 -0.0157
-0.0800 -0.0322 -0.0178 0.0342 0.0354 0.0224 0.0017
...
[ torch.FloatTensor{64,3,7,7} ]
My code:
/usr/bin/python3
import os
import torch
import torchvision.models as models
from pathlib import Path
model = models.resnet50(pretrained=True)
#model.eval()
input_names=["actual_input"]
output_names=["output"]
onnx_path="output/resnet50.onnx"
output_path="output"
model_path=Path(output_path).with_suffix(".pth")
ir_path=model_path.with_suffix(".xml")
with torch.no_grad():
dummy_input=None
dummy_input=torch.randn(1,3,224,224)
dummy_input.requires_grad = False
#Export Pytorch to ONNX
torch.onnx.export(model,
dummy_input,
onnx_path,
opset_version=10,
verbose=False,
input_names=input_names,
output_names=output_names)
# export_params=True)