About Quantization using torch.ao.quantization

I am implementing post-training static Quantization in pytorch 2.1.0.
While I can’t get the quantized model. Below are my quantization step:
##########
def static_quantize(weight_path, qconfig_mapping):
weights, imgsz = opt.weights, opt.img_size

# load Pruned_model
# 
device = select_device(opt.device)
device = torch.device("cpu")
# Load model
pruned_model = attempt_load(weights, map_location=device)  # load FP32 model

stride = int(pruned_model.stride.max())  # model stride
imgsz = check_img_size(imgsz, s=stride)  # check img_size
pruned_model.eval()
if weight_path != '1234':  # get pruned_model's weights if path is not default "1234" 
    pruned_model.load_state_dict(torch.load(weight_path),False)   #load pruned_model.pt


#  "input"  "output":  Add  QuantStub , DeQuantStub

pruned_model = QuantStub()(pruned_model)
pruned_model = DeQuantStub()(pruned_model)


# 
pruned_model = prepare(pruned_model, qconfig_mapping)
quantized_model = convert(pruned_model, inplace=False)

# save model's weights
torch.save(pruned_model.state_dict(), "static_quantized_original.pt") 
print("Static Quantization Success!!!!")    

return quantized_model

############
and I use the function below to check:
############
def check_quantization(model, output_file):
num_modules_to_print = 5
with open(output_file, ‘w’) as f:
num_printed_modules = 0
for name, module in model.named_modules():
if isinstance(module, nn.Conv2d):
f.write(f"Module {name} is a Conv2d layer.\n")
f.write(f"Quantized: {isinstance(module, nnq.Conv2d)}\n\n")
elif isinstance(module, nn.Linear):
f.write(f"Module {name} is a Linear layer.\n")
f.write(f"Quantized: {isinstance(module, nnq.Linear)}\n\n")
# Add more checks for other quantized layers if needed

        # Print some example layers for inspection
            if num_printed_modules < num_modules_to_print:
                f.write(f"+++++++++Module {name}:\n")
                f.write(f"+++++++++{module}\n\n")
                num_printed_modules += 1
        # Add more checks for other quantized layers if needed

##############
I always get " Module model.5.conv is a Conv2d layer.
Quantized: False "
Please tell me how to solve this problem, Thank you