How can I save a convert_pt2e model?

# torch.__version__
# '2.2.0+cu121'

import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq
from torch.ao.quantization.quantizer.x86_inductor_quantizer import X86InductorQuantizer

m = torch.nn.Linear(5, 5)
example_args = (torch.randn(1, 5),)
pre_aten_dialect = torch._export.capture_pre_autograd_graph(m.eval(), example_args)
quantizer = X86InductorQuantizer()
quantizer.set_global(xiq.get_default_x86_inductor_quantization_config())
pt2e_dialect = prepare_pt2e(pre_aten_dialect, quantizer)
pt2e_dialect(example_args[0])
pt2e_dialect = convert_pt2e(pt2e_dialect)
try:
    torch.save(pt2e_dialect, "model.pt")
except :
    print("error")
model = torch.compile(pt2e_dialect)
torch.save(model, "model.pt")

I got

 ...RecursionError: maximum recursion depth exceeded while calling a Python object

Static quantize:
Since it cannot be saved, it must be calibrated every time.

yeah it’s here: (prototype) PyTorch 2 Export Post Training Quantization — PyTorch Tutorials 2.2.0+cu121 documentation