Optimize_for_mobile double the size of static quantization deeplabv3 image segmentation model

import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
from torchvision.models.segmentation import deeplabv3_resnet50


model = deeplabv3_resnet50(pretrained=True)
model.eval()

#apply static quantization
model_to_quantize_qnnpack = copy.deepcopy(model)
qconfig_mapping = get_default_qconfig_mapping("qnnpack")
model_to_quantize_qnnpack.eval()
# prepare
model_prepared_qnnpack = quantize_fx.prepare_fx(model_to_quantize_qnnpack, qconfig_mapping, input_batch)
# calibrate 
calibrate(model_prepared_qnnpack, val_loader)
# quantize
model_quantized_qnnpack = quantize_fx.convert_fx(model_prepared_qnnpack)

scripted_module = torch.jit.script(model_quantized_qnnpack)
# Export lite interpreter version model (compatible with lite interpreter)
scripted_module._save_for_lite_interpreter("model_output/deeplabv3_quant_static_qnnpack_scripted.ptl")

optimized_scripted_module = optimize_for_mobile(scripted_module)
# using optimized lite interpreter model makes inference about 60% faster than the non-optimized lite interpreter model, which is about 6% faster than the non-optimized full jit model
optimized_scripted_module._save_for_lite_interpreter("model_output/deeplabv3_quant_static_qnnpack_optimized.ptl")

original model: 160MB
quantization model: 40MB
after optimize for mobile: 80MB

for some reason optimize_for_mobile double the size of the model, is this an expected behavior or not?
could someone help me with explanation?