Need help understanding an error message while converting a nn.Module to ONNX via jit

my goal is to implement a few custom image preprocessing transformations and convert them to ONNX operators.
i’m fairly new to Pytorch and torchvision.
i wrote a script like below as my first prototype to test out things, but it failed with the below error message:
“Only prim ops are allowed to not have a registered operator but aten::mul doesn’t have one either. We don’t know if this op has side effects.”

I wonder if anyone knows what does it mean and how to resolve it.
Below is the code to reproduce it.

import cv2
import torch
import torchvision.transforms.functional as F

def test_rescale_with_padding():
    img = torch.tensor(
        cv2.imread("tests/data/2022-11-10T02-18-31-036Z-1066.jpg"), dtype=torch.float32
    ).permute(2, 0, 1)[None, :]
    result_img = RescaleWithPadding(512, 512)(img)
    module = torch.jit.script(RescaleWithPadding(512, 512))
    torch.onnx.export(
        model=module,
        args=(img,),
        f="transforms.onnx",
        export_params=True,
        verbose=True,
        input_names=["input"],
        output_names=["output"],
    )
    print(f"finished: {result_img.shape}")

class RescaleWithPadding(torch.nn.Module):
    def __init__(self, height: int, width: int, padding_value: int = 0):
        super(RescaleWithPadding, self).__init__()
        self.height = height
        self.width = width
        self.padding_value = padding_value
        self.max_size = max(height, width)
        self.interpolation = F.InterpolationMode.BILINEAR

    def forward(self, img: torch.Tensor):
        b, c, image_height, image_width = img.shape
        smaller_edge_size = min(image_height, image_width)
        img = F.resize(
            img=img,
            size=[smaller_edge_size],
            interpolation=self.interpolation,
            max_size=self.max_size,
        )
        return img

here is the torch version i’m using:

torch                         1.12.1
torchvision                   0.13.1

Below is the detailed stacktrace:

__________________________ test_rescale_with_padding ___________________________

    def test_rescale_with_padding():
        img = torch.tensor(
            cv2.imread("tests/data/2022-11-10T02-18-31-036Z-1066.jpg"), dtype=torch.float32
        ).permute(2, 0, 1)[None, :]
        result_img = RescaleWithPadding(512, 512)(img)
        expected_aspect_ratio = img.shape[2] / img.shape[3]
        actual_aspect_ratio = result_img.shape[2] / result_img.shape[3]
        assert actual_aspect_ratio == pytest.approx(expected_aspect_ratio, 0.01)
        module = torch.jit.script(RescaleWithPadding(512, 512))
        # module = RescaleWithPadding(512, 512)
>       torch.onnx.export(
            model=module,
            args=(img,),
            f="transforms.onnx",
            export_params=True,
            verbose=True,
            input_names=["input"],
            output_names=["output"],
        )

tests/test_transforms.py:17: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
../../Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torch/onnx/__init__.py:350: in export
    return utils.export(
../../Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torch/onnx/utils.py:163: in export
    _export(
../../Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torch/onnx/utils.py:1074: in _export
    graph, params_dict, torch_out = _model_to_graph(
../../Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torch/onnx/utils.py:731: in _model_to_graph
    graph = _optimize_graph(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

graph = graph(%img.1 : Float(1, 3, 723, 352, strides=[3, 1, 1056, 3], requires_grad=0, device=cpu)):
  %1 : __torch__.tests.te...tensor.py:569:14
          -> (%img.33)
        block1():
          -> (%img.47)
      -> (%img.46)
  return (%img.10)

operator_export_type = <OperatorExportTypes.ONNX: 0>
_disable_torch_constant_prop = False, fixed_batch_size = False, params_dict = {}
dynamic_axes = {}, input_names = ['input']
module = <torch.ScriptModule object at 0x17480a9f0>

    def _optimize_graph(
        graph: _C.Graph,
        operator_export_type: _C_onnx.OperatorExportTypes,
        _disable_torch_constant_prop: bool = False,
        fixed_batch_size: bool = False,
        params_dict=None,
        dynamic_axes=None,
        input_names=None,
        module=None,
    ):
        # Inline everything
        _C._jit_pass_inline(graph)
    
        # Remove fork/wait nodes
        _C._jit_pass_inline_fork_wait(graph)
        _C._jit_pass_lint(graph)
>       _C._jit_pass_lower_all_tuples(graph)
E       RuntimeError: kind_.is_prim() INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/jit/ir/ir.cpp":1219, please report a bug to PyTorch. Only prim ops are allowed to not have a registered operator but aten::mul doesn't have one either. We don't know if this op has side effects.

../../Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torch/onnx/utils.py:234: RuntimeError
----------------------------- Captured stdout call -----------------------------
Torch IR graph at exception: graph(%img.1 : Float(1, 3, 723, 352, strides=[3, 1, 1056, 3], requires_grad=0, device=cpu)):
  %1 : __torch__.tests.test_transforms.___torch_mangle_4.RescaleWithPadding = prim::CreateObject()
  %2 : int[] = prim::Constant[value=[0, 1, 2, 3, 4]]()
  %3 : int = prim::Constant[value=4]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:548:18
  %4 : int[] = prim::Constant[value=[6, 7]]()
  %5 : int = prim::Constant[value=-2]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:27:30
  %6 : int[] = prim::Constant[value=[1, 2]]()
  %7 : str = prim::Constant[value="Tensor is not a torch image."]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:15:24
  %8 : str = prim::Constant[value="builtins.TypeError"]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:15:14
  %9 : bool = prim::Constant[value=0]()
  %10 : int = prim::Constant[value=0]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:470:70
  %11 : int = prim::Constant[value=2]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:450:32
  %12 : int = prim::Constant[value=1]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:450:29
  %13 : str = prim::Constant[value="builtins.ValueError"]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:444:14
  %14 : str = prim::Constant[value="Size must be an int or a 1 or 2 element tuple/list, not a {} element tuple/list"]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:452:16
  %15 : str = prim::Constant[value="max_size should only be passed if size specifies the length of the smaller edge, i.e. size should be an int or a sequence of length 1 in torchscript mode."]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:456:16
  %16 : str = prim::Constant[value="max_size = {} must be strictly greater than the requested size for the smaller edge size = {}"]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:477:20
  %17 : int = prim::Constant[value=6]() # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:491:69
  %18 : str = prim::Constant[value="bilinear"]()
  %19 : NoneType = prim::Constant()
  %self.max_size : int = prim::Constant[value=512]()
  %21 : int[] = aten::size(%img.1) # <string>:13:9
  %b : int, %c : int, %image_height.1 : int, %image_width.1 : int = prim::ListUnpack(%21)
  %smaller_edge_size.1 : int = prim::min(%image_height.1, %image_width.1) # /Users/asia/work/edge-client/tests/test_transforms.py:41:28
  %27 : int[] = prim::ListConstruct(%smaller_edge_size.1)
  %28 : Tensor = prim::Uninitialized()
  %29 : int? = prim::Uninitialized()
  %30 : int = aten::dim(%img.1) # <string>:3:9
  %31 : bool = aten::ge(%30, %11) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:10:11
  %32 : bool = aten::__not__(%31) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:14:7
   = prim::If(%32) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:14:4
    block0():
       = prim::RaiseException(%7, %8) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:15:8
      -> ()
    block1():
      -> ()
  %33 : int = aten::len(%27) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:450:11
  %34 : bool = aten::__contains__(%6, %33) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:450:11
  %35 : bool = aten::__not__(%34) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:450:11
   = prim::If(%35) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:450:8
    block0():
      %36 : str = aten::format(%14, %33) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:452:16
       = prim::RaiseException(%36, %13) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:451:12
      -> ()
    block1():
      -> ()
  %37 : bool = aten::ne(%33, %12) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:454:36
  %max_size.59 : int? = prim::If(%37) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:454:8
    block0():
       = prim::RaiseException(%15, %13) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:455:12
      -> (%29)
    block1():
      -> (%self.max_size)
   = prim::If(%32) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:14:4
    block0():
       = prim::RaiseException(%7, %8) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:15:8
      -> ()
    block1():
      -> ()
  %39 : int[] = aten::slice(%21, %5, %19, %12) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:27:20
  %height.1 : int, %width.1 : int = prim::ListUnpack(%39)
  %42 : bool = aten::eq(%33, %12) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:468:32
  %43 : bool, %44 : Tensor, %new_h : int, %new_w : int = prim::If(%42) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:468:4
    block0():
      %47 : bool = aten::le(%width.1, %height.1) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:469:32
      %99 : Tensor, %100 : Tensor = prim::If(%47) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:469:22
        block0():
          %49 : (int, int) = prim::TupleConstruct(%width.1, %height.1)
          -> (%width.1, %height.1)
        block1():
          %50 : (int, int) = prim::TupleConstruct(%height.1, %width.1)
          -> (%height.1, %width.1)
      %98 : (int, int) = prim::TupleConstruct(%99, %100) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:469:22
      %short.1 : int, %long.1 : int = prim::TupleUnpack(%98)
      %requested_new_short.1 : int = aten::__getitem__(%27, %10) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:470:65
      %54 : int = aten::mul(%requested_new_short.1, %100) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:472:55
      %55 : float = aten::div(%54, %99) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:472:55
      %new_long.1 : int = aten::Int(%55) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:472:51
      %57 : bool = aten::__isnot__(%max_size.59, %19) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:474:11
      %new_short : int, %new_long : int = prim::If(%57) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:474:8
        block0():
          %max_size.27 : int = prim::unchecked_cast(%max_size.59)
          %61 : bool = aten::le(%max_size.27, %requested_new_short.1) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:475:15
           = prim::If(%61) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:475:12
            block0():
              %62 : str = aten::format(%16, %max_size.27, %27) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:477:20
               = prim::RaiseException(%62, %13) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:476:16
              -> ()
            block1():
              -> ()
          %63 : bool = aten::gt(%new_long.1, %max_size.27) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:480:15
          %new_short.27 : int, %new_long.29 : int = prim::If(%63) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:480:12
            block0():
              %66 : int = aten::mul(%max_size.27, %requested_new_short.1) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:481:42
              %67 : float = aten::div(%66, %new_long.1) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:481:42
              %new_short.3 : int = aten::Int(%67) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:481:38
              -> (%new_short.3, %max_size.27)
            block1():
              -> (%requested_new_short.1, %new_long.1)
          -> (%new_short.27, %new_long.29)
        block1():
          -> (%requested_new_short.1, %new_long.1)
      %102 : Tensor, %103 : Tensor = prim::If(%47) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:483:23
        block0():
          %70 : (int, int) = prim::TupleConstruct(%new_short, %new_long)
          -> (%new_short, %new_long)
        block1():
          %71 : (int, int) = prim::TupleConstruct(%new_long, %new_short)
          -> (%new_long, %new_short)
      %101 : (int, int) = prim::TupleConstruct(%102, %103) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:483:23
      %new_w.1 : int, %new_h.1 : int = prim::TupleUnpack(%101)
      %74 : int[] = prim::ListConstruct(%width.1, %height.1)
      %75 : int[] = prim::ListConstruct(%102, %103)
      %76 : bool = aten::eq(%74, %75) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:485:11
      %77 : Tensor = prim::If(%76) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:485:8
        block0():
          -> (%img.1)
        block1():
          -> (%28)
      -> (%76, %77, %103, %102)
    block1():
      %new_w.5 : int = aten::__getitem__(%27, %12) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:489:23
      %new_h.5 : int = aten::__getitem__(%27, %10) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:489:32
      -> (%9, %28, %new_h.5, %new_w.5)
  %img.10 : Tensor = prim::If(%43)
    block0():
      -> (%44)
    block1():
      %81 : bool = aten::lt(%30, %3) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:548:7
      %img.37 : Tensor = prim::If(%81) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:548:4
        block0():
          %img.7 : Tensor = aten::unsqueeze(%img.1, %10) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:549:14
          -> (%img.7)
        block1():
          -> (%img.1)
      %out_dtype.1 : int = prim::dtype(%img.37)
      %85 : bool = aten::__contains__(%4, %out_dtype.1) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:554:7
      %86 : bool = aten::__not__(%85) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:554:7
      %img.42 : Tensor = prim::If(%86) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:554:4
        block0():
          %img.23 : Tensor = aten::to(%img.37, %17, %9, %9, %19) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:557:14
          -> (%img.23)
        block1():
          -> (%img.37)
      %89 : int[] = prim::ListConstruct(%new_h, %new_w)
      %img.15 : Tensor = aten::__interpolate(%img.42, %89, %19, %18, %9, %19, %9) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:496:10
      %img.47 : Tensor = prim::If(%81) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:562:4
        block0():
          %img.5 : Tensor = aten::squeeze(%img.15, %10) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:563:14
          -> (%img.5)
        block1():
          -> (%img.15)
      %img.46 : Tensor = prim::If(%86) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:565:4
        block0():
          %94 : bool = aten::__contains__(%2, %out_dtype.1) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:566:11
          %img.49 : Tensor = prim::If(%94) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:566:8
            block0():
              %img.19 : Tensor = aten::round(%img.47) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:568:18
              -> (%img.19)
            block1():
              -> (%img.47)
          %img.33 : Tensor = aten::to(%img.49, %out_dtype.1, %9, %9, %19) # /Users/asia/Library/Caches/pypoetry/virtualenvs/edge-client-NU7TENB2-py3.10/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:569:14
          -> (%img.33)
        block1():
          -> (%img.47)
      -> (%img.46)
  return (%img.10)

=========================== short test summary info ============================
FAILED tests/test_transforms.py::test_rescale_with_padding - RuntimeError: ki...

Thanks!

any pointers / documentations are appreciate to help me better understand the context.
i also posted a question in this thread: [ONNX] Only prim ops are allowed to not have a registered operator but aten::mul doesn't have one either · Issue #80300 · pytorch/pytorch · GitHub