# torchao/quantization/quant_api.py
def quantize_(
model: torch.nn.Module,
config: AOBaseConfig,
filter_fn: Optional[Callable[[torch.nn.Module, str], bool]] = _is_linear,
device: Optional[torch.types.Device] = None,
):
Currently, default filter_fn is _is_linear only filter nn.Linear. Can we filter
nn.Conv to quantize Convolution layer?