Let me know please if this is expected behaviour.
class ModelB(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer = nn.Linear(1, 1)
def forward(self, x):
self.layer(x)
return x
class ModelA(nn.Module):
def __init__(self) -> None:
super().__init__()
self.model = ModelB()
self.models = [ModelB()]
def forward(self, x):
x = self.model(x)
for model in self.models:
x = model(x)
return x
X = torch.rand(10, 1).to('mps')
model_a = ModelA().to('mps')
out = model_a(X)
print(out)
the error is on this line: x = model(x)
while being almost the same as correct: x = self.model(x)
RuntimeError: Placeholder storage has not been allocated on MPS device!
when I try: self.models = nn.ModuleList([ModelB()])
it again works correctly.