Expected all tensors to be on the same device, but found at least two devices , cpu and cuda:0!

I keep on getting this error(shown below), but when I print the location of the tensors, its all on cuda, kindly assist.

map_reg_code = self.mapping(reg_code)
return F.linear(input, self.weight, self.bias)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_addmm)
class MLP_pretrained_model(torch.nn.Module):
    def __init__(self, pretrained_model_lvl3):
        super(MLP_pretrained_model, self).__init__()
        self.pretrained_model_lvl3 = pretrained_model_lvl3
        self.hidden = 64
        
        self.input_dims =  491776
        self.output_dims = 2 #predicting the dice and jacobian


        mapping_input = 64
        self.mapping_output = 256

        self.max_pool = torch.nn.MaxPool2d(kernel_size = 3, stride= 2, padding = 1)
        self.fc1 = torch.nn.Linear(self.input_dims, self.hidden) #added
        self.relu = torch.nn.ReLU()
        self.fc2 = torch.nn.Linear(self.hidden, self.hidden)
        self.fc3 = torch.nn.Linear(self.hidden, self.output_dims)

        self.mapping = nn.Sequential(
        # nn.Linear(1, mapping_input),
        nn.Linear(3, mapping_input),
        nn.ReLU(),
        nn.Linear(mapping_input, mapping_input),
        nn.ReLU(),
        nn.Linear(mapping_input, mapping_input),
        nn.ReLU(),
        nn.Linear(mapping_input, self.mapping_output),
    )
    
    def forward(self, x, y, reg_code, pretrained):

        if pretrained == True:
            e0 = self.pretrained_model_lvl3(x, y, reg_code, pretrained) 
            e0 = self.max_pool(e0)
            encoded = e0.flatten(1, -1)
            map_reg_code = self.mapping(reg_code)
            encoded_reg_code = torch.cat([encoded, map_reg_code], dim= 1)
            # fc = nn.Linear(encoded_reg_code.shape[1], self.hidden)
            encoded_reg_code = self.fc1(encoded_reg_code)
            encoded_reg_code = self.relu(encoded_reg_code)
            encoded_reg_code = self.fc2(encoded_reg_code)
            encoded_reg_code = self.relu(encoded_reg_code)
            encoded_reg_code = self.fc3(encoded_reg_code)
        
            

            return encoded_reg_code
        else:
            return self.pretrained_model_lvl3(x, y, reg_code, pretrained)
            

Check where both model and reg_code are allocated by
print(your_model.device) and print(reg_code.device) outside the forward function.

If they are the different, copy the one which is allocated on CPU by

device = torch.device('cuda:0')
if reg_code.device ==  torch.device("cpu"):
    reg_code = reg_code.to(device)
elif your_model.device == torch.device("cpu"):
    your_model = your_model.to(device)

Thanks for your help!