Pytorch_lightning: tensors on wrong device

I am trying to use pytorch_lightning with multiple GPU, but get the following error:

RuntimeError: All input tensors must be on the same device. Received cuda:0 and cuda:3

How to fix this? Below is a MWE:

import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import pytorch_lightning as pl

class DataModule(pl.LightningDataModule):

    def __init__(self):
        super().__init__()
    
    def setup(self, stage):
        # called on each gpu
        n = 128
        x = torch.randn(n, 1, 64,64)
        data = list(zip(x,x))
        self.test  = DataLoader(data, batch_size=32)
        self.train = DataLoader(data, batch_size=32)
        self.val   = DataLoader(data, batch_size=32)
        
    def train_dataloader(self):
        return self.train

    def val_dataloader(self):
        return self.val

    def test_dataloader(self):
        return self.test
    
class Net(pl.LightningModule):
    def __init__(self):
        super().__init__()
        self.net = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(1,1))
    
    def forward(self, x):
        return self.net(x)
    
    def validation_step(self, batch, batch_idx):
        loss = self.compute_batch_loss(batch, batch_idx)
        self.log('val_loss', loss)
        return loss
    
    def compute_batch_loss(self, batch, batch_idx):
        x, y = batch
        y_hat = self.net(x)
        loss = F.mse_loss(y_hat, y)
        return loss

    def training_step(self, batch, batch_idx):
        loss = self.compute_batch_loss(batch, batch_idx)
        self.log('train_loss', loss)
        return loss

    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
        return optimizer

dm = DataModule()
model = Net()
trainer = pl.Trainer(gpus=4, 
                     distributed_backend="dp",
                     max_epochs=1,
                    )
trainer.fit(model, dm)

Your code works fine on my machine and outputs:

Epoch 0: 100%|ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ| 8/8 [00:00<00:00, 111.96it/s, loss=2.734, v_num=0]
1 Like

Thanks a lot for digging into this! You also used 4 gpus? Could you rerun with say 10 epochs? This behavior is somewhat random for me and does not always trigger after the first epoch.

I used your provided code on a node with 8 GPUs.
I can try to rerun it later.

1 Like

Update: I can reproduce the issue using 100 epochs and get:

  File "tmp.py", line 64, in <module>
    trainer.fit(model, dm)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 444, in fit
    results = self.accelerator_backend.train()
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/accelerators/dp_accelerator.py", line 106, in train
    results = self.train_or_test()
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 74, in train_or_test
    results = self.trainer.train()
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 493, in train
    self.train_loop.run_training_epoch()
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 589, in run_training_epoch
    self.trainer.run_evaluation(test_mode=False)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 609, in run_evaluation
    eval_loop_results = self.evaluation_loop.log_epoch_metrics(deprecated_eval_results, epoch_logs, test_mode)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 210, in log_epoch_metrics
    eval_loop_results = self.trainer.logger_connector.on_evaluation_epoch_end(
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/logger_connector.py", line 113, in on_evaluation_epoch_end
    self._log_on_evaluation_epoch_end_metrics(epoch_logs)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/logger_connector.py", line 181, in _log_on_evaluation_epoch_end_metrics
    reduced_epoch_metrics = dl_metrics[0].__class__.reduce_on_epoch_end(dl_metrics)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/core/step_result.py", line 464, in reduce_on_epoch_end
    recursive_stack(result)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/core/step_result.py", line 603, in recursive_stack
    result[k] = collate_tensors(v)
  File "/opt/conda/envs/tmp/lib/python3.8/site-packages/pytorch_lightning/core/step_result.py", line 625, in collate_tensors
    return torch.stack(items)
RuntimeError: All input tensors must be on the same device. Received cuda:1 and cuda:3

Iā€™m not familiar enough with PyTorch Lightning and would suggest to create an issue with this code snippet in their repository.

CC @williamFalcon for visibility.

1 Like

Thanks a lot for reproducing!

1 Like