convet pt file of pytorch yolov3 to onnx


The following code is based on master branch of GitHub - ultralytics/yolov3: YOLOv3 in PyTorch > ONNX > CoreML > TFLite
I am trying to convert pt file of yolov3 to onnx format.

The following line could be correctly executed:
p = torch_model(x)

I have confirmed that both the network and input are put to GPU.

however, torch.onnx.export() function encountered the following issue:

Traceback (most recent call last):
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\onnx\", line 632, in _export
    _model_to_graph(model, args, verbose, input_names,
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\onnx\", line 449, in _model_to_graph
    params_dict = torch._C._jit_pass_onnx_constant_fold(graph, params_dict,
RuntimeError: Input, output and indices must be on the current device

from __future__ import print_function
import os
import datetime
import numpy as np

from models.yolo import *
import torch
# import torch.nn as nn
# from models.common import Conv, DWConv
from utils.google_utils import attempt_download

class Ensemble(nn.ModuleList):
    # Ensemble of models
    def __init__(self):
        super(Ensemble, self).__init__()

    def forward(self, x, augment=False):
        y = []
        for module in self:
            y.append(module(x, augment)[0])
        # y = torch.stack(y).max(0)[0]  # max ensemble
        # y =, 1)  # nms ensemble
        y = torch.stack(y).mean(0)  # mean ensemble
        return y, None  # inference, train output

def attempt_load(weights, map_location=None):
    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
    model = Ensemble()
    for w in weights if isinstance(weights, list) else [weights]:
        model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval())  # load FP32 model

    # Compatibility updates
    for m in model.modules():
        if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
            m.inplace = True  # pytorch 1.7.0 compatibility
        elif type(m) is Conv:
            m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility

    if len(model) == 1:
        return model[-1]  # return model
        print('Ensemble created with %s\n' % weights)
        for k in ['names', 'stride']:
            setattr(model, k, getattr(model[-1], k))
        return model  # return ensemble

def convert_model(input_pth, output_onnx):

    print('cuda is available == {}'.format(torch.cuda.is_available()))
    device = select_device('')
    nc = 4
    torch_model = attempt_load(input_pth, map_location=device).half()
    # torch_model =
    # torch_model = torch_model.cpu()

    half = True
    imgsz = 640
    batch_size = 2  # just a random number
    # x = torch.rand(batch_size, 3, 640, 640, device=device).half()
    x = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    x = x.half()
    p = torch_model(x)

    # p = torch_model(x.half() if half else x) if device.type != 'cpu' else None

    torch.onnx.export(torch_model,  # model being run
                      x,,  # model input (or a tuple for multiple inputs)
                      output_onnx,  # where to save the model (can be a file or file-like object)
                      opset_version=11,  # the ONNX version to export the model to
                      input_names=['input'],  # the model's input names
                      output_names=['output'],  # the model's output names

1 Like


Is there any help about this issue?