Expected object of device type cuda but got device type cpu

Code below:

class Critic(nn.Module):
    def __init__(self):
        super(Critic, self).__init__()
        self.main = nn.Sequential(
            nn.Linear((data_train.shape[1] - 1), 128).to(device),
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, utils_rich.y_count)
        )

    def forward(self, input):
        output = self.main(input)
        return output

def init_weights(m):
    if type(m) == nn.Linear:
        torch.nn.init.xavier_uniform(m.weight)
        m.bias.data.fill_(0.01)

device = torch.device("cuda:3")
netG = Generator().to(device)
netC = Critic().to(device)
netC.apply(init_weights)

Its model code and init.

Train looks like:

for epoch in tqdm(range(TOTAL_ITERATIONS), position=0, leave=True):
    for i, data in enumerate(all_dataloader):
        train_full, w_full, train_x_1, w_x_1, train_x_2, w_x_2 = data
        train_full.to(device)
        for j in range(critic_policy(epoch)):
            #Critic train
            netC.zero_grad()
            output = netC(train_full)
........

and error message looks like

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-51-d80103d7bf6d> in <module>
      6             #Critic train
      7             netC.zero_grad()
----> 8             output = netC(train_full)
      9             print(output.shape)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in self._forward_hooks.values():
    543             hook_result = hook(self, input, result)

<ipython-input-46-50038b4dca0f> in forward(self, input)
     41 
     42     def forward(self, input):
---> 43         output = self.main(input)
     44         return output
     45 

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in self._forward_hooks.values():
    543             hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in self._forward_hooks.values():
    543             hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/linear.py in forward(self, input)
     85 
     86     def forward(self, input):
---> 87         return F.linear(input, self.weight, self.bias)
     88 
     89     def extra_repr(self):

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
   1368     if input.dim() == 2 and bias is not None:
   1369         # fused op is marginally faster
-> 1370         ret = torch.addmm(bias, input, weight.t())
   1371     else:
   1372         output = input.matmul(weight.t())

RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'mat1' in call to _th_addmm

I tried all solution for this error, that was on this site (apply nn.Sequential and transfer to cuda all data and model)

Hi,

The .to() operation on Tensor is not inplace, so you need to replace train_full.to(device) by train_full = train_full.to(device).

1 Like