"RuntimeError: BlobWriter not loaded" error when exporting a PyTorch model to CoreML. How to fix it?

I get a “RuntimeError: BlobWriter not loaded” error when exporting a PyTorch model to CoreML. How to fix it?

Same issue with Python 3.11 and Python 3.10. Same issue with torch 2.3.1 and 2.2.0. Tested on Windows 10.

Export script:

# -*- coding: utf-8 -*-
"""Core ML Export
pip install transformers torch coremltools nltk
"""
import os
from transformers import AutoModelForTokenClassification, AutoTokenizer
import torch
import torch.nn as nn
import nltk
import coremltools as ct

nltk.download('punkt')

# Load the model and tokenizer
model_path = os.path.join('model')
model = AutoModelForTokenClassification.from_pretrained(model_path, local_files_only=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)

# Modify the model's forward method to return a tuple
class ModifiedModel(nn.Module):
    def __init__(self, model):
        super(ModifiedModel, self).__init__()
        self.model = model
        self.device = model.device  # Add the device attribute

    def forward(self, input_ids, attention_mask, token_type_ids=None):
        outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        return outputs.logits


modified_model = ModifiedModel(model)

# Export to Core ML
def convert_to_coreml(model, tokenizer):
    # Define a dummy input for tracing
    dummy_input = tokenizer("A French fan", return_tensors="pt")
    dummy_input = {k: v.to(model.device) for k, v in dummy_input.items()}

    # Trace the model with the dummy input
    traced_model = torch.jit.trace(model, (
    dummy_input['input_ids'], dummy_input['attention_mask'], dummy_input.get('token_type_ids')))

    # Convert to Core ML
    inputs = [
        ct.TensorType(name="input_ids", shape=dummy_input['input_ids'].shape),
        ct.TensorType(name="attention_mask", shape=dummy_input['attention_mask'].shape)
    ]
    if 'token_type_ids' in dummy_input:
        inputs.append(ct.TensorType(name="token_type_ids", shape=dummy_input['token_type_ids'].shape))

    mlmodel = ct.convert(traced_model, inputs=inputs)

    # Save the Core ML model
    mlmodel.save("model.mlmodel")
    print("Model exported to Core ML successfully")

convert_to_coreml(modified_model, tokenizer)

Error stack:

C:\Users\dernoncourt\anaconda3\envs\coreml\python.exe C:\Users\dernoncourt\PycharmProjects\coding\export_model_to_coreml6_fopr_SE_q.py 
Failed to load _MLModelProxy: No module named 'coremltools.libcoremlpython'
Fail to import BlobReader from libmilstoragepython. No module named 'coremltools.libmilstoragepython'
Fail to import BlobWriter from libmilstoragepython. No module named 'coremltools.libmilstoragepython'
[nltk_data] Downloading package punkt to
[nltk_data]     C:\Users\dernoncourt\AppData\Roaming\nltk_data...
[nltk_data]   Package punkt is already up-to-date!
C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\transformers\modeling_utils.py:4565: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead
  warnings.warn(
When both 'convert_to' and 'minimum_deployment_target' not specified, 'convert_to' is set to "mlprogram" and 'minimum_deployment_target' is set to ct.target.iOS15 (which is same as ct.target.macOS12). Note: the model will not run on systems older than iOS15/macOS12/watchOS8/tvOS15. In order to make your model run on older system, please set the 'minimum_deployment_target' to iOS14/iOS13. Details please see the link: https://apple.github.io/coremltools/docs-guides/source/target-conversion-formats.html
Model is not in eval mode. Consider calling '.eval()' on your model prior to conversion
Converting PyTorch Frontend ==> MIL Ops:   0%|          | 0/127 [00:00<?, ? ops/s]Core ML embedding (gather) layer does not support any inputs besides the weights and indices. Those given will be ignored.
Converting PyTorch Frontend ==> MIL Ops:  99%|█████████▉| 126/127 [00:00<00:00, 2043.73 ops/s]
Running MIL frontend_pytorch pipeline: 100%|██████████| 5/5 [00:00<00:00, 212.62 passes/s]
Running MIL default pipeline:  37%|███▋      | 29/78 [00:00<00:00, 289.75 passes/s]C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\mil\ops\defs\iOS15\elementwise_unary.py:894: RuntimeWarning: overflow encountered in cast
  return input_var.val.astype(dtype=string_to_nptype(dtype_val))
Running MIL default pipeline: 100%|██████████| 78/78 [00:00<00:00, 137.56 passes/s]
Running MIL backend_mlprogram pipeline: 100%|██████████| 12/12 [00:00<00:00, 315.01 passes/s]
Traceback (most recent call last):
  File "C:\Users\dernoncourt\PycharmProjects\coding\export_model_to_coreml6_fopr_SE_q.py", line 58, in <module>
    convert_to_coreml(modified_model, tokenizer)
  File "C:\Users\dernoncourt\PycharmProjects\coding\export_model_to_coreml6_fopr_SE_q.py", line 51, in convert_to_coreml
    mlmodel = ct.convert(traced_model, inputs=inputs)
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\_converters_entry.py", line 581, in convert
    mlmodel = mil_convert(
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\converter.py", line 188, in mil_convert
    return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\converter.py", line 212, in _mil_convert
    proto, mil_program = mil_convert_to_proto(
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\converter.py", line 307, in mil_convert_to_proto
    out = backend_converter(prog, **kwargs)
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\converter.py", line 130, in __call__
    return backend_load(*args, **kwargs)
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\backend\mil\load.py", line 902, in load
    mil_proto = mil_proto_exporter.export(specification_version)
  File "C:\Users\dernoncourt\anaconda3\envs\coreml\lib\site-packages\coremltools\converters\mil\backend\mil\load.py", line 400, in export
    raise RuntimeError("BlobWriter not loaded")
RuntimeError: BlobWriter not loaded

Process finished with exit code 1

From a quick search it seems the error is caused by coremltools which does not seem to support Windows. Since the issue is not PyTorch-specific you might want to ask in their repository.

thanks, you’re right. The code works fine on Ubuntu 20.04 (tested with Python 3.10 and torch 2.3.1). One just needs to change:

mlmodel.save("model.mlmodel")

to

mlmodel.save("model.mlpackage")

Note that the error message was different when using mlmodel.save("model.mlmodel"):

  File "/home/dernonco/code/export_model_to_coreml6_fopr_SE_q.py", line 58, in <module>
    convert_to_coreml(modified_model, tokenizer)
  File "/home/dernonco/code/export_model_to_coreml6_fopr_SE_q.py", line 54, in convert_to_coreml
    mlmodel.save("model.mlmodel")
  File "/opt/conda/envs/py310/lib/python3.10/site-packages/coremltools/models/model.py", line 515, in save
    raise Exception(
Exception: For an ML Program, extension must be .mlpackage (not .mlmodel). Please see https://coremltools.readme.io/docs/unified-conversion-api#target-conversion-formats to see the difference between neuralnetwork and mlprogram model types.

As for Windows, I guess coremltools doesn’t work well there, or need some special treatment.