Post_training static quantization

when i do static quantization in BERT like this code:

quantized_model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, cache_dir=args.cache_dir if args.cache_dir else None, )
quantized_model.eval()
quantized_model.qconfig = torch.quantization.default_qconfig
print(‘quantized_model.qconfig’,quantized_model.qconfig)

# #for X86 architectures Quantizes weights on a per-channel basis
# quantized_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
# print('quantized_model.qconfig', quantized_model.qconfig)

torch.quantization.prepare(quantized_model,inplace=True)

train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,
                              collate_fn=collate_fn)
with torch.no_grad():
    for step, batch in enumerate(tqdm(train_dataloader,desc='post-training static quantization')):
        inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
        outputs = quantized_model(**inputs)
        loss = outputs[0]  # model outputs are always tuple in pytorch-transformers (see doc)
        break
print('convert quantized model:')
# torch.quantization.convert(quantized_model, mapping={nn.Linear: nnq.Linear}, inplace=True)
torch.quantization.convert(quantized_model, inplace=True)
print('static quantized model')
print(quantized_model)

      
print_model_size(quantized_model)
t1 = time.time()
      
result = evaluate(args, quantized_model, tokenizer, prefix=prefix)
t2 = time.time()

an error occur:
Could not run ‘quantized::layer_norm’ with arguments from the ‘CPU’ backend. ‘quantized::layer_norm’ is only available for these backends: [QuantizedCPU]
how can i solve this problem?