Infer quantized model of YOLOv6, an error occured : 'Conv2d' object has no attribute '_modules'

I am going to quantize YOLOv6 using the PTSQ API:
l have quatized the CNN network and got the quantized result,but when i infer image with the quantized pt file, an error occured:
'AttributeError: 'Conv2d' object has no attribute '_modules '

my quant script:

import torch
import torch.nn as nn
from torch.ao.quantization import QConfigMapping
import torch.quantization.quantize_fx as quantize_fx

from torch.quantization import default_observer
import copy
# from torch.quantization.fuser_method_mappings import get_default_fuser_method_mapping


def get_module_by_name(model, name):
    """
    获取给定模型中名称为name的模块对象
    """
    module_names = name.split('.')
    module = model
    for module_name in module_names:
        module = module._modules[module_name]
    return module


def get_layers_to_fuse(model):
    all_layers = []
    module_to_fuse = []
    for name,module in model.named_modules():
        all_layers.append(name)
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv2d):
            conv_name_walk = name.split('.')
            bn_name_walk = conv_name_walk[:-1] + ['bn']
            act_name_walk = conv_name_walk[:-1] + ['act']
            relu_name_walk = conv_name_walk[:-1] + ['relu']
            silu_name_walk = conv_name_walk[:-1] + ['silu']
            bn = '.'.join(bn_name_walk)
            act = '.'.join(act_name_walk)
            relu = '.'.join(relu_name_walk)
            silu = '.'.join(silu_name_walk)
            if bn in all_layers:
                if act in all_layers:
                    temp = []
                    temp.append(name)
                    temp.append(bn)
                    temp.append(act)
                    module_to_fuse.append(temp)
                elif relu in all_layers:
                    temp = []
                    temp.append(name)
                    temp.append(bn)
                    temp.append(relu)
                    module_to_fuse.append(temp)
                elif silu in all_layers:
                    temp = []
                    temp.append(name)
                    temp.append(bn)
                    temp.append(silu)
                    module_to_fuse.append(temp)
                else:
                    temp = []
                    temp.append(name)
                    temp.append(bn)
                    module_to_fuse.append(temp)
    return module_to_fuse



def get_layers_to_fuse_without_silu(model):
    all_layers = []
    module_to_fuse = []
    for name,module in model.named_modules():
        all_layers.append(name)
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv2d):
            conv_name_walk = name.split('.')
            bn_name_walk = conv_name_walk[:-1] + ['bn']
            act_name_walk = conv_name_walk[:-1] + ['act']
            relu_name_walk = conv_name_walk[:-1] + ['relu']
            silu_name_walk = conv_name_walk[:-1] + ['silu']
            bn = '.'.join(bn_name_walk)
            act = '.'.join(act_name_walk)
            relu = '.'.join(relu_name_walk)
            silu = '.'.join(silu_name_walk)
            if bn in all_layers:
                if act in all_layers:
                    bn_idx, act_idx = all_layers.index(bn), all_layers.index(act)
                    act_module = get_module_by_name(model, act)
                    if not isinstance(act_module, nn.SiLU):
                        temp = []
                        temp.append(name)
                        temp.append(bn)
                        temp.append(act)
                        module_to_fuse.append(temp)
                    else:
                        temp = []
                        temp.append(name)
                        temp.append(bn)
                        #temp.append(act)
                        module_to_fuse.append(temp)
                elif relu in all_layers:
                    temp = []
                    temp.append(name)
                    temp.append(bn)
                    temp.append(relu)
                    module_to_fuse.append(temp)
                else:
                    temp = []
                    temp.append(name)
                    temp.append(bn)
                    module_to_fuse.append(temp)
    return module_to_fuse

# 定义 conv bn 和 silu 的融合规则
def fuse_conv_bn_silu(modules):
    for m in modules:
        if type(m) is nn.Conv2d:
            if type(m.next_module) is nn.BatchNorm2d and type(m.next_module.next_module) is nn.SiLU:
                fused = torch.nn.utils.fuse_conv_bn_silu(m, m.next_module, m.next_module.next_module)
                new_mod = torch.nn.utils.fusion.fuse(fused, inplace=True)
                if hasattr(m, "next_module"):
                    m.next_module = new_mod
                else:
                    m = new_mod
                continue
        fuse_conv_bn([m])

def ptsq_api(pt_file,name):
    pt_file = pt_file
    pt_loaded = torch.load(pt_file)
    model_pt = pt_loaded['model']
    model_fp32 = model_pt.eval()
    #my_fuser_method_mapping = get_default_fuser_method_mapping()
    #my_fuser_method_mapping[(nn.Conv2d, nn.BatchNorm2d, nn.SiLU)] = fuse_conv_bn_silu
    model_fp32.qconfig = torch.quantization.get_default_qconfig('qnnpack')
    model_list_to_fuse = get_layers_to_fuse_without_silu(model_fp32)
    #print('model_list_to_fuse = ',model_list_to_fuse)
    #torch.quantization.fuse_modules(model_fp32, model_list_to_fuse, fuser_method=my_fuser_method_mapping, inplace=True)
    torch.quantization.fuse_modules(model_fp32,model_list_to_fuse,inplace=True)
    model_fp32_prepared = torch.quantization.prepare(model_fp32)
    img_batch = torch.randn(1,3,640,640)
    #observer = default_observer()
    #observer(img_batch)
    model_fp32_prepared(img_batch)
    model_int8 = torch.quantization.convert(model_fp32_prepared)
    model_int8_dict = model_int8.state_dict()
    save(model_int8,name)

def save(model,name):
    name = name
    ckpt = {
        'model':model
    }
    torch.save(ckpt,name)

def fxptq_api(pt_file,name):
    pt_file = pt_file
    pt_loaded = torch.load(pt_file)
    model_pt = pt_loaded['model']

    model_to_quantize = copy.deepcopy(model_pt)
    qconfig_mapping = QConfigMapping().set_global(torch.quantization.get_default_qconfig('qnnpack'))
    model_to_quantize.eval()
    # prepare
    example_inputs = torch.rand((1, 3, 640, 640))
    example_inputs = (example_inputs * 2) - 1
    model_prepared = quantize_fx.prepare_fx(model_to_quantize, qconfig_mapping, example_inputs)
    # calibrate (not shown)
    # quantize
    model_quantized = quantize_fx.convert_fx(model_prepared)
    save(model_quantized,name)

pt_file = '/home/cgf/Projects/YOLO/v6/YOLOv6/yolov6n_qarepvgg.pt'
name = 'q_yolov6_qarepvgg.pt'
ptsq_api(pt_file,name)
# fxptq_api(pt_file,name)


my infer cmd:

python tools/infer.py --weights q_yolov6_qarepvgg.pt --source 77.jpg --device cpu

the traceback prompt:

(YOLOv6) cgf@ubuntu2204:~/Projects/YOLO/v6/YOLOv6$ python tools/infer.py --weights q_yolov6_qarepvgg.pt --source 77.jpg --device cpu
Namespace(weights='q_yolov6_qarepvgg.pt', source='77.jpg', webcam=False, webcam_addr='0', yaml='data/coco.yaml', img_size=[640, 640], conf_thres=0.4, iou_thres=0.45, max_det=1000, device='cpu', save_txt=False, not_save_img=False, save_dir=None, view_img=False, classes=None, agnostic_nms=False, project='runs/inference', name='exp', hide_labels=False, hide_conf=False, half=False)
Save directory already existed
Loading checkpoint from q_yolov6_qarepvgg.pt
Traceback (most recent call last):
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/tools/infer.py", line 120, in <module>
    main(args)
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/tools/infer.py", line 115, in main
    run(**vars(args))
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
    return func(*args, **kwargs)
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/tools/infer.py", line 107, in run
    inferer = Inferer(source, webcam, webcam_addr, weights, device, yaml, img_size, half)
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/yolov6/core/inferer.py", line 68, in __init__
    self.model = DetectBackend(weights, device=self.device)
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/yolov6/layers/common.py", line 481, in __init__
    model = load_checkpoint(weights, map_location=device)
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/yolov6/utils/checkpoint.py", line 26, in load_checkpoint
    model = ckpt['ema' if ckpt.get('ema') else 'model'].float()
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 823, in float
    return self._apply(lambda t: t.float() if t.is_floating_point() else t)
  File "/home/cgf/Projects/YOLO/v6/YOLOv6/yolov6/models/yolo.py", line 43, in _apply
    self = super()._apply(fn)
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 641, in _apply
    module._apply(fn)
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 641, in _apply
    module._apply(fn)
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 641, in _apply
    module._apply(fn)
  [Previous line repeated 1 more time]
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 640, in _apply
    for module in self.children():
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1796, in children
    for name, module in self.named_children():
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1815, in named_children
    for name, module in self._modules.items():
  File "/home/cgf/Projects/YOLO/v6/env_config/conda/envs/YOLOv6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1269, in __getattr__
    raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'Conv2d' object has no attribute '_modules'

What shall I do to slove the problem,and if there are any problem in my quantization script.

I think this section in the docs would be helpful here: Quantization — PyTorch master documentation