Why model.to(device) wouldn't put tensors on a custom layer to the same device?

Could you post a small code snippet reproducing this error?
I tried to reproduce your error, but it seems to work fine:

class MyModule(nn.Module):
    def __init__(self):
        super(MyModule, self).__init__()
        self.fc1 = nn.Linear(1, 1)
        
    def forward(self, x):
        x = self.fc1(x)
        return x

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(10, 1)
        self.module1 = MyModule()
    
    def forward(self, x):
        x = self.fc1(x)
        x = self.module1(x)
        return x   

model = Net()
model = model.to('cuda:0')
print(model.module1.fc1.weight.type())
> torch.cuda.FloatTensor
print(model.fc1.weight.type())
> torch.cuda.FloatTensor
4 Likes