Torchaudio: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!

Here is my code:

import sys
import torch
import torchaudio

def train(net,dataloader,loss_func,optimizer,device):
    
    # put in training mode
    
    net.train()
    
    # to compute training accuracy
    
    num_true_pred = 0
    
    # to compute epoch training loss
    
    total_loss = 0
    
    for i,(signals,labels) in enumerate(dataloader):
        
        # track progress
        
        sys.stdout.write('\rProgress: {:.2f}%'.format(i*dataloader.batch_size/len(dataloader)))
        sys.stdout.flush()
        
        # load onto GPU
        signals = signals.to(device).unsqueeze(dim=1)
        labels = labels.to(device).type_as(signals) # needed for BCE loss
        
        # compute log Mel spectrogram
        
        mel_spec = torchaudio.transforms.MelSpectrogram(sample_rate = 16000,
                                                        n_fft = 1024,
                                                        n_mels = 256,
                                                        hop_length = 63).to(device)
        to_dB = torchaudio.transforms.AmplitudeToDB().to(device)
        images = to_dB(mel_spec(signals))
        
        # zero the accumulated parameter gradients
        
        optimizer.zero_grad()
        
        # outputs of net for batch input
        
        outputs = net(images).squeeze()
        
        # compute (mean) loss
        
        loss = loss_func(outputs,labels)
        
        # compute loss gradients with respect to parameters
        
        loss.backward()
        
        # update parameters according to optimizer
        
        optimizer.step()
        
        # record running statistics
        
        # since sigmoid(0) = 0.5, then negative values correspond to class 0
        # and positive values correspond to class 1
        
        class_preds = outputs > 0 
        num_true_pred = num_true_pred + torch.sum(class_preds == labels)
        
        # loss is not mean-reduced
        
        total_loss = total_loss + loss
    
    train_loss = total_loss.item() / len(dataloader.dataset)
    
    train_acc = num_true_pred.item() / len(dataloader.dataset)
    
    return net,train_loss,train_acc

I keep getting the following error:

RuntimeError                              Traceback (most recent call last)

<ipython-input-20-b175e934d6f9> in <module>()
     18                                      loss_func,
     19                                      optimizer,
---> 20                                      device)
     21 
     22     print('Training Loss: {:.4f}'.format(train_loss))

6 frames

/content/drive/My Drive/Colab Notebooks/disc_baseline/net_train.py in train(net, dataloader, loss_func, optimizer, device)
     34                                                         n_mels = 256,
     35                                                         hop_length = 63).to(device)
---> 36         to_dB = torchaudio.transforms.AmplitudeToDB().to(device)
     37         images = to_dB(mel_spec(signals))
     38 

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

/usr/local/lib/python3.6/dist-packages/torchaudio/transforms.py in forward(self, waveform)
    424             Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
    425         """
--> 426         specgram = self.spectrogram(waveform)
    427         mel_specgram = self.mel_scale(specgram)
    428         return mel_specgram

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    720             result = self._slow_forward(*input, **kwargs)
    721         else:
--> 722             result = self.forward(*input, **kwargs)
    723         for hook in itertools.chain(
    724                 _global_forward_hooks.values(),

/usr/local/lib/python3.6/dist-packages/torchaudio/transforms.py in forward(self, waveform)
     82         """
     83         return F.spectrogram(waveform, self.pad, self.window, self.n_fft, self.hop_length,
---> 84                              self.win_length, self.power, self.normalized)
     85 
     86 

/usr/local/lib/python3.6/dist-packages/torchaudio/functional.py in spectrogram(waveform, pad, window, n_fft, hop_length, win_length, power, normalized)
    160     # default values are consistent with librosa.core.spectrum._spectrogram
    161     spec_f = torch.stft(
--> 162         waveform, n_fft, hop_length, win_length, window, True, "reflect", False, True
    163     )
    164 

/usr/local/lib/python3.6/dist-packages/torch/functional.py in stft(input, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided)
    463         input = F.pad(input.view(extended_shape), (pad, pad), pad_mode)
    464         input = input.view(input.shape[-signal_dim:])
--> 465     return _VF.stft(input, n_fft, hop_length, win_length, window, normalized, onesided)
    466 
    467 

RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!

I am using Google Colab. The device variable is device(type='cuda'). I have already moved torchaudio.transforms.MelSpectrogram and torchaudio.transforms.AmplitudeToDB to the GPU but I am not sure what else to do to fix this.

Any suggestions?

I cannot reproduce. Can you provide a minimal example?

import torchaudio

device = "cuda"

waveform, sample_rate = torchaudio.load("sinewave.wav") # from audio/test/torchaudio_unittest/assets
waveform = waveform.to(device)

mel_spec = torchaudio.transforms.MelSpectrogram(sample_rate = sample_rate, n_fft = 1024, n_mels = 256, hop_length = 63).to(device)
to_dB = torchaudio.transforms.AmplitudeToDB().to(device)

images = to_dB(mel_spec(waveform))

Hey @vincentqb, thanks for the quick reply. Before I create the minimal example, is it necessary to move both torchaudio.transforms.MelSpectrogram and torchaudio.transforms.AmplitudeToDB to the GPU using the to method even though waveform is on the GPU? Can’t I just do this instead:

mel_spec = torchaudio.transforms.MelSpectrogram(sample_rate = sample_rate, n_fft = 1024, n_mels = 256, hop_length = 63)
to_dB = torchaudio.transforms.AmplitudeToDB()

images = to_dB(mel_spec(waveform))
1 Like

No, the Mel spectrogram has variables like the Mel filterbank and the window function that are stored on the CPU, so you have to move the mel_spec to a CUDA device.