Hi ,
I am using the code below to do quantization:
###############
def saveModel(model):
torch.save(model.state_dict() , ‘static_quantized_weights.pt’)
def static_quantize(weight_path, qconfig_mapping):
weights, imgsz, cali_dataset = opt.weights, opt.img_size, opt.datasets
# Load Model
#
device = torch.device("cpu")
# Load model
ckpt = torch.load(weights[0])
model = attempt_load(weights, map_location=device)
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
dataset_processing = LoadImages(cali_dataset, img_size=imgsz, stride=stride)
model_to_quantize = copy.deepcopy(model)
example_input = torch.randn(1, 3, 1024, 1024)
qconfig_mapping = get_default_qconfig_mapping("fbgemm") # better for intel CPU
model_to_quantize.eval()
# prepare
model_prepared = quantize_fx.prepare_fx(model_to_quantize, qconfig_mapping, example_input)
calibration(model_prepared, dataset_processing)
# quantize
model_quantized = quantize_fx.convert_fx(model_prepared)
saveModel(model_quantized)
loaded_model = torch.load('static_quantized_weights.pt', map_location='cpu')
print("Static Quantization Success!!!!")
##########################
and I using the code below to load quantized-model:
###################
ckpt = torch.load(weights, map_location=device) # load checkpoint
model_ckpt = attempt_load(weights, map_location=device) # you will get the weight from
ckpt[‘model’]
exclude = [‘anchor’] if (opt.cfg or hyp.get(‘anchors’)) and not opt.resume else # exclude keys
quantized_weights = torch.load(opt.quantization_weights, map_location='cpu')
example_input = torch.randn(1, 3, 1024, 1024)
qconfig_mapping = get_default_qconfig_mapping("fbgemm")
model_ckpt.to(device)
anchor_grid_modules2 = find_anchor_grid_modules(model_ckpt)
grid_file_path2 = "Original_anchor_grid_modules.txt"
write_anchor_grid_modules_to_file(anchor_grid_modules2, grid_file_path2)
with open('parameter_groups_Originalmodel.txt', 'w') as f:
for name, param in model_ckpt.named_parameters(): # OK have value
f.write(f'{name}\n')
with open('module_names.txt', 'w') as f:
# Iterate over named modules
for name, module in model_ckpt.named_modules():
# Write module name to file
f.write(f'{module}\n')
# prepare
model_prepared = quantize_fx.prepare_fx(model_ckpt, qconfig_mapping, example_input)
# calibrate (not shown)
# quantize
model = quantize_fx.convert_fx(model_prepared)
model.load_state_dict(quantized_weights, strict=False)
model.to(device)
with open('model_state_dict_keys.txt', 'w') as f:
for key in model.state_dict().keys():
f.write(f'{key}\n')
with open('quantized_model_state_dict.txt', 'w') as f: # OK have value
count = 0
for key, value in model.state_dict().items():
f.write(f'{key}: {value}\n')
count += 1
if count >= 30:
break
with open('parameter_groups_Qmodel.txt', 'w') as f: # no value
for idx, param in enumerate(model.parameters()):
f.write(f'Parameter {idx}: {param.shape}\n')
with open('Quan_module_names.txt', 'w') as f: # OK have value
# Iterate over named modules
for name, module in model.named_modules():
# Write module name to file
f.write(f'{name}\n')
###############
Just like above, the model_ckpt has value of model_ckpt…named_parameters() & .named_modules() & .state_dict() ;
But the model (which is load the quantized-weights) doesn’t have value for model.named_parameters()
I am not sure what happend to my loading quantized-model
Do you have any idea, please?