Conversion of quantized pytorch transformer model to pyscript throws error Runtime Error: method cannot be used as a value

Below is the code for converting the parseq model based on transformer for OCR. If I comment the quantization part the model successfully gets converted to torchscript but when using quantization its throwing error:

RuntimeError:
method cannot be used as a value:
File “/usr/local/lib/python3.10/dist-packages/torch/nn/modules/activation.py”, line 1100
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
~~~~~~~~~~~~~~~~~~~~ <— HERE
self.out_proj.bias,
)

import torch

from PIL import Image

from strhub.data.module import SceneTextDataModule

from strhub.models.utils import load_from_checkpoint, parse_model_args

from torch.quantization import QuantStub, DeQuantStub, float_qparams_weight_only_qconfig, default_qconfig

from torch.utils.mobile_optimizer import optimize_for_mobile

Load model and image transforms

parseq = torch.hub.load(‘baudm/parseq’, ‘parseq’, pretrained=True).eval()

img_transform = SceneTextDataModule.get_transform(parseq.hparams.img_size)

Below is the forked repo I am using:

Torch version: 1.12.1

Below is the model architecture:
PARSeq(
(encoder): Encoder(
(patch_embed): PatchEmbed(
(proj): QuantizedConv2d(3, 192, kernel_size=(4, 8), stride=(4, 8), scale=1.0, zero_point=0)
(norm): Identity()
)
(pos_drop): QuantizedDropout(p=0.0, inplace=False)
(norm_pre): Identity()
(blocks): Sequential(
(0): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(1): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(2): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(3): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(4): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(5): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(6): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(7): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(8): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(9): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(10): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
(11): Block(
(norm1): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(attn): Attention(
(qkv): QuantizedLinear(in_features=192, out_features=576, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(attn_drop): QuantizedDropout(p=0.0, inplace=False)
(proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(proj_drop): QuantizedDropout(p=0.0, inplace=False)
)
(ls1): Identity()
(drop_path1): Identity()
(norm2): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(mlp): Mlp(
(fc1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(act): GELU(approximate=none)
(drop1): QuantizedDropout(p=0.0, inplace=False)
(fc2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(drop2): QuantizedDropout(p=0.0, inplace=False)
)
(ls2): Identity()
(drop_path2): Identity()
)
)
(norm): QuantizedLayerNorm((192,), eps=1e-06, elementwise_affine=True)
(fc_norm): Identity()
(head): Identity()
)
(decoder): Decoder(
(layers): ModuleList(
(0): DecoderLayer(
(self_attn): MultiheadAttention(
(out_proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
)
(cross_attn): MultiheadAttention(
(out_proj): QuantizedLinear(in_features=192, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
)
(linear1): QuantizedLinear(in_features=192, out_features=768, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(dropout): QuantizedDropout(p=0.1, inplace=False)
(linear2): QuantizedLinear(in_features=768, out_features=192, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(norm1): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
(norm2): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
(norm_q): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
(norm_c): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
(dropout1): QuantizedDropout(p=0.1, inplace=False)
(dropout2): QuantizedDropout(p=0.1, inplace=False)
(dropout3): QuantizedDropout(p=0.1, inplace=False)
)
)
(norm): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
(head): QuantizedLinear(in_features=192, out_features=95, scale=1.0, zero_point=0, qscheme=torch.per_tensor_affine)
(text_embed): TokenEmbedding(
(embedding): QuantizedEmbedding(num_embeddings=97, embedding_dim=192, dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams)
)
(dropout): QuantizedDropout(p=0.1, inplace=False)
)

Was able to convert with pytorch 2.0