Hi,
I’m trying to run a model with MPS on Apple M4 chipset, but I get the following error with the code below.
File "/Users/ssmeets/Projects/fastai/fastbook/test.py", line 32, in <module>
output = model(x)
^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/fastai/fastai/text/models/awdlstm.py", line 130, in forward
output, new_h = rnn(output, self.hidden[l])
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/fastai/fastai/text/models/awdlstm.py", line 64, in forward
return self.module(*args)
^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ssmeets/Projects/fastai/fastbook/venv312/lib/python3.12/site-packages/torch/nn/modules/rnn.py", line 1124, in forward
result = _VF.lstm(
^^^^^^^^^
RuntimeError: Placeholder storage has not been allocated on MPS device!
This is my code. Should this be reported as a bug? When I use CPU, the issue does not appear.
import torch
from fastai.text.models import AWD_LSTM
# Choose the device (this will pick MPS on Apple Silicon if available)
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
print(f"Using device: {device}")
# Create an instance of AWD_LSTM.
# Parameters:
# vocab_sz: Size of the vocabulary.
# emb_sz: Embedding size.
# n_hid: Hidden dimension.
# n_layers: Number of LSTM layers.
# pad_token: Index for the padding token.
vocab_sz = 1000
emb_sz = 50
n_hid = 50
n_layers = 1
pad_token = 0
model = AWD_LSTM(vocab_sz, emb_sz, n_hid, n_layers, pad_token=pad_token).to(device)
print(model)
# Create dummy input data: shape (batch_size, sequence_length)
batch_size = 4
seq_len = 20
# Random integers between 0 and vocab_sz-1 simulate token IDs.
x = torch.randint(0, vocab_sz, (batch_size, seq_len)).to(device)
# Forward pass: model expects input shape [batch, seq_len]
output = model(x)
print("Output shape:", output.shape)
# Compute a dummy loss (mean of the output)
loss = output.mean()
print("Loss:", loss.item())
# Perform a backward pass.
loss.backward()
print("Backward pass completed.")
Thanks,
Sjoerd