Hello all!
I’m trying to convert a pytorch model(https://github.com/ZTao-z/resnet-ssd#use-a-pre-trained-ssd-network-for-detection) to onnx.
This is the code I’m trying to use:
from ssd import build_ssd
import os
import sys
import time
import torch
import torch.onnx
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
import argparse
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if __name__ == '__main__':
ssd_net = build_ssd('train', 300, 21)
if args.resume:
print('Resuming training, loading {}...'.format(args.resume))
ssd_net.load_weights(args.resume)
ssd_net = ssd_net.cuda().eval()
dummy_input = torch.randn(1, 3, 300, 300, device='cuda', requires_grad=True)
for param in ssd_net.parameters():
param.requires_grad = False
# for name, param in ssd_net.named_parameters():
# print(name, param)
# break
torch.onnx.export(ssd_net, dummy_input, "onnx_model_name.onnx", verbose=True, export_params=True)
This is the error:
Resuming training, loading ssd300_mAP_77.43_v2.pth...
Loading weights into state dict...
Finished!
Traceback (most recent call last):
File "onnx_conversion.py", line 38, in <module>
torch.onnx.export(ssd_net, dummy_input, "onnx_model_name.onnx", verbose=True, export_params=True)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/onnx/__init__.py", line 208, in export
custom_opsets, enable_onnx_checker, use_external_data_format)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/onnx/utils.py", line 92, in export
use_external_data_format=use_external_data_format)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/onnx/utils.py", line 530, in _export
fixed_batch_size=fixed_batch_size)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/onnx/utils.py", line 366, in _model_to_graph
graph, torch_out = _trace_and_get_graph_from_model(model, args)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/onnx/utils.py", line 319, in _trace_and_get_graph_from_model
torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/jit/__init__.py", line 338, in _get_trace_graph
outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/user_name/venv/lib/python3.6/site-packages/torch/jit/__init__.py", line 426, in forward
self._force_outplace,
RuntimeError: output 1 ( 0.0133 0.0133 0.0700 0.0700
0.0133 0.0133 0.1025 0.1025
0.0133 0.0133 0.0990 0.0495
0.0133 0.0133 0.0495 0.0990
0.0400 0.0133 0.0700 0.0700
------------------ 8723 more lines ------------------
0.5000 0.5000 0.8700 0.8700
0.5000 0.5000 0.9558 0.9558
0.5000 0.5000 1.0000 0.6152
0.5000 0.5000 0.6152 1.0000
[ CUDAFloatType{8732,4} ]) of traced region did not have observable data dependence with trace inputs; this probably indicates your program cannot be understood by the tracer.
There is no problem with invoking the pytorch model & loading state_dict.
The problem comes with conversion to onnx model.
Any help is greatly appreciated!
Thanks!