RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking arugment for argument weight in method wrapper_cudnn_batch_norm)

class DenseLayer(nn.Module):
    def __init__(self, in_channels, growth_rate):
        super(DenseLayer, self).__init__()
        self.norm = nn.BatchNorm2d(in_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv = nn.Conv2d(in_channels, growth_rate, kernel_size=3, stride=1, padding=1, bias=True)
        self.drop = nn.Dropout2d(p=0.2)

    def forward(self, x):
        x = self.norm(x)
        x = self.relu(x)
        x = self.conv(x

        x = self.drop(x)
        return x  
class DenseBlock(nn.Module):
    def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
        super(DenseBlock, self).__init__()
        self.upsample = upsample
        self.layers = nn.ModuleList([DenseLayer(in_channels + i * growth_rate, growth_rate) for i in range(n_layers)])

    def forward(self, x):
        if self.upsample:
            new_features = []
            for layer in self.layers:
                out = layer(x)
                x = torch.cat([x, out], 1)
                new_features.append(out)
            return torch.cat(new_features, 1)
        else:
            for layer in self.layers:
                out = layer(x)
                x = torch.cat([x, out], 1)  # 1 = channel axis
            return x

class Bottleneck(nn.Module):
    def __init__(self, in_channels, growth_rate, n_layers):
        super(Bottleneck, self).__init__()
        self.bottleneck = DenseBlock(in_channels, growth_rate, n_layers, upsample=True)

    def forward(self, x):
        return self.bottleneck(x)

Above is the code I am currently using. When I make a call to the bottleneck module, I get the error RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking arugment for argument weight in method wrapper_cudnn_batch_norm)

The error is raised if the input and parameters are on different devices, which could happen if you forget to push either one of them to the GPU.
Make sure to use model.to('cuda') as well as input = input.to('cuda') before executing the code.

PS: your current code is a bit hard to read so please format it by wrapping it into three backticks ``` :wink:

1 Like

Thanks for the response. I just realized that I forgot to push my decoder to GPU as well.