I am trying to pass a mapping to a module and export it to onnx, which I attempt in the following code:
import torch
import numpy
import io
import typing
import onnx
import onnxruntime
class Something(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, batch: typing.Dict[str, torch.Tensor]):
y = self.layer(batch["x"])
return {"y": y}
def main():
something = Something()
with io.BytesIO() as onnx_bytes:
torch.onnx.export(something, ({"x": torch.ones(10, 10)}, {}), onnx_bytes)
onnx_model = onnx.load_model_from_string(onnx_bytes.getvalue())
model_proto = onnx_model.SerializeToString()
session = onnxruntime.InferenceSession(model_proto)
inputs = {session.get_inputs()[0].name: {"x": numpy.ones((10, 10))}}
print(session.run(None, inputs)
However I get an exception:
Traceback (most recent call last):
File "/media/main.py", line 53, in <module>
main()
File "/media/main.py", line 32, in main
print(session.run(None, inputs))
File "/media/venv/lib/python3.8/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 188, in run
return self._sess.run(output_names, input_feed, run_options)
onnxruntime.capi.onnxruntime_pybind11_state.Fail: <class 'TypeError'>: only size-1 arrays can be converted to Python scalars
Is the desired behaviour achivable somehow?