Export to onnx format failed

I wanted to graphically visualise a very complex network using Netron. I can’t export the architecture to onnx format. This is my architecture.

>>>fc_arch
DGMArch(
  (fc_start): FCLayer(
    (linear): WeightNormLinear(in_features=2, out_features=1, bias=True)
  )
  (dgm_layers): ModuleList(
    (0): ModuleDict(
      (z): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (g): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (r): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (h): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
    )
    (1): ModuleDict(
      (z): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (g): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (r): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (h): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
    )
    (2): ModuleDict(
      (z): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (g): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (r): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (h): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
    )
    (3): ModuleDict(
      (z): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (g): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (r): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (h): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
    )
    (4): ModuleDict(
      (z): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (g): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (r): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
      (h): DGMLayer(
        (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
        (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
      )
    )
  )
  (fc_end): FCLayer(
    (linear): Linear(in_features=1, out_features=1, bias=True)
  )
)

The architecture contains 3 components.

>>> fc_arch._modules.keys()
odict_keys(['fc_start', 'dgm_layers', 'fc_end'])

I can easily export fc_start.

>>> fc_arch.fc_start
FCLayer(
  (linear): WeightNormLinear(in_features=2, out_features=1, bias=True)
)

using this command.

torch.onnx.export(fc_arch._modules['fc_start'], dummy_input, 'fc_start.onnx', input_names=['X','Y'], output_names=['u'])

but the problem starts when I export dgm_layer. This is the DGM layer.

>>> fc_arch.dgm_layers
ModuleList(
  (0): ModuleDict(
    (z): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (g): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (r): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (h): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
  )
  (1): ModuleDict(
    (z): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (g): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (r): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (h): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
  )
  (2): ModuleDict(
    (z): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (g): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (r): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (h): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
  )
  (3): ModuleDict(
    (z): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (g): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (r): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (h): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
  )
  (4): ModuleDict(
    (z): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (g): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (r): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
    (h): DGMLayer(
      (linear_1): WeightNormLinear(in_features=2, out_features=1, bias=False)
      (linear_2): WeightNormLinear(in_features=1, out_features=1, bias=False)
    )
  )
)

Here the dgm_layer is

>>> type(fc_arch.dgm_layers)
torch.nn.modules.container.ModuleList

When I try to export, it gives me the following error:

>>> torch.onnx.export(fc_arch.dgm_layers, dummy_input, 'dgm_layers.onnx', input_names=['X','Y'], output_names=['u'])
---------------------------------------------------------------------------
NotImplementedError                       Traceback (most recent call last)
/tmp/ipykernel_47794/1172123868.py in <cell line: 1>()
----> 1 torch.onnx.export(fc_arch.dgm_layers, dummy_input, 'dgm_layers.onnx', input_names=['X','Y'], output_names=['u'])

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/__init__.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions)
    303 
    304     from torch.onnx import utils
--> 305     return utils.export(model, args, f, export_params, verbose, training,
    306                         input_names, output_names, operator_export_type, opset_version,
    307                         do_constant_folding, dynamic_axes,

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions)
    116             operator_export_type = OperatorExportTypes.ONNX
    117 
--> 118     _export(model, args, f, export_params, verbose, training, input_names, output_names,
    119             operator_export_type=operator_export_type, opset_version=opset_version,
    120             do_constant_folding=do_constant_folding, dynamic_axes=dynamic_axes,

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, onnx_shape_inference, export_modules_as_functions)
    717 
    718             graph, params_dict, torch_out = \
--> 719                 _model_to_graph(model, args, verbose, input_names,
    720                                 output_names, operator_export_type,
    721                                 val_do_constant_folding,

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in _model_to_graph(model, args, verbose, input_names, output_names, operator_export_type, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size, training, dynamic_axes)
    497         args = (args, )
    498 
--> 499     graph, params, torch_out, module = _create_jit_graph(model, args)
    500 
    501     params_dict = _get_named_param_dict(graph, params)

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in _create_jit_graph(model, args)
    438         return graph, params, torch_out, None
    439     else:
--> 440         graph, torch_out = _trace_and_get_graph_from_model(model, args)
    441         torch._C._jit_pass_onnx_lint(graph)
    442         state_dict = _unique_state_dict(model)

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in _trace_and_get_graph_from_model(model, args)
    389 
    390     trace_graph, torch_out, inputs_states = \
--> 391         torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True)
    392     warn_on_static_input_change(inputs_states)
    393 

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/jit/_trace.py in _get_trace_graph(f, args, kwargs, strict, _force_outplace, return_inputs, _return_inputs_states)
   1164     if not isinstance(args, tuple):
   1165         args = (args,)
-> 1166     outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
   1167     return outs

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/jit/_trace.py in forward(self, *args)
    125                 return tuple(out_vars)
    126 
--> 127         graph, out = torch._C._create_graph_by_tracing(
    128             wrapper,
    129             in_vars + module_state,

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/jit/_trace.py in wrapper(*args)
    116             if self._return_inputs_states:
    117                 inputs_states.append(_unflatten(in_args, in_desc))
--> 118             outs.append(self.inner(*trace_inputs))
    119             if self._return_inputs_states:
    120                 inputs_states[0] = (inputs_states[0], trace_inputs)

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/nn/modules/module.py in _slow_forward(self, *input, **kwargs)
   1096                 recording_scopes = False
   1097         try:
-> 1098             result = self.forward(*input, **kwargs)
   1099         finally:
   1100             if recording_scopes:

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/nn/modules/module.py in _forward_unimplemented(self, *input)
    199         registered hooks while the latter silently ignores them.
    200     """
--> 201     raise NotImplementedError
    202 
    203 

NotImplementedError: 
torch.onnx.export(fc_arch._modules['fc_0'], dummy_input, '0.onnx', input_names=['X','Y'], output_names=['u'])
torch.onnx.export(fc_arch.fc_layers._modules, dummy_input, 'fc_layers.onnx', input_names=['X','Y'], output_names=['u']) # just fully connected layer
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/tmp/ipykernel_42623/2301621278.py in <cell line: 1>()
----> 1 torch.onnx.export(fc_arch.fc_layers._modules, dummy_input, 'fc_layers.onnx', input_names=['X','Y'], output_names=['u']) # just fully connected layer

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/__init__.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions)
    303 
    304     from torch.onnx import utils
--> 305     return utils.export(model, args, f, export_params, verbose, training,
    306                         input_names, output_names, operator_export_type, opset_version,
    307                         do_constant_folding, dynamic_axes,

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions)
    116             operator_export_type = OperatorExportTypes.ONNX
    117 
--> 118     _export(model, args, f, export_params, verbose, training, input_names, output_names,
    119             operator_export_type=operator_export_type, opset_version=opset_version,
    120             do_constant_folding=do_constant_folding, dynamic_axes=dynamic_axes,

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, onnx_shape_inference, export_modules_as_functions)
    699         _set_opset_version(opset_version)
    700         _set_operator_export_type(operator_export_type)
--> 701         with exporter_context(model, training):
    702             val_keep_init_as_ip = _decide_keep_init_as_input(keep_initializers_as_inputs,
    703                                                              operator_export_type,

/scratch/s.1915438/modulus/lib/python3.9/contextlib.py in __enter__(self)
    117         del self.args, self.kwds, self.func
    118         try:
--> 119             return next(self.gen)
    120         except StopIteration:
    121             raise RuntimeError("generator didn't yield") from None

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in exporter_context(model, mode)
    100 @contextlib.contextmanager
    101 def exporter_context(model, mode):
--> 102     with select_model_mode_for_export(model, mode) as mode_ctx, \
    103             disable_apex_o2_state_dict_hook(model) as apex_ctx:
    104         yield (mode_ctx, apex_ctx)

/scratch/s.1915438/modulus/lib/python3.9/contextlib.py in __enter__(self)
    117         del self.args, self.kwds, self.func
    118         try:
--> 119             return next(self.gen)
    120         except StopIteration:
    121             raise RuntimeError("generator didn't yield") from None

/scratch/s.1915438/env/modulus/lib/python3.9/site-packages/torch/onnx/utils.py in select_model_mode_for_export(model, mode)
     39 def select_model_mode_for_export(model, mode):
     40     if not isinstance(model, torch.jit.ScriptFunction):
---> 41         is_originally_training = model.training
     42 
     43         if mode is None:

AttributeError: 'collections.OrderedDict' object has no attribute 'training'

You won’t be able to export a ModuleList or ModuleDict directly, as they don’t provide a forward method and thus cannot be executed directly. Instead these are just “containers” storing other modules. In your custom model implementation you can then use these objects as a list or dict, respectively, and use the internal layers in your forward method.

1 Like