I’m running a resnet50 model and get an error stating the following model expected a CPU at the test run.
model = models.resnet50(pretrained = False)
model.Linear = nn.Sequential(nn.Linear(in_features = 2048, out_features = 2),
nn.ReLU(),
nn.Sigmoid())
model.cuda()
criterion = nn.BCEWithLogitsLoss().cuda()
optimizer = torch.optim.Adam(model.Linear.parameters(), lr=0.001)
import time
start_time = time.time()epochs = 1
max_trn_batch = 6361
max_tst_batch = 1559train_losses =
test_losses =train_correct =
test_correct =train_policy_list =
test_policy_list =train_prediction =
test_prediction =for i in range(epochs):
trn_corr = 0 tst_corr = 0 # Run the training batches with torch.no_grad(): for b, (image, label, policy) in enumerate(train_loader): image.cuda() label.cuda() # Limit the number of batches if b == max_trn_batch: break b+=1 # Apply the model y_pred = model(image) predicted = torch.max(y_pred.data, 1)[1] batch_corr = (predicted == label).sum() trn_corr += batch_corr loss = criterion(y_pred, label) train_policy = policy.data.cpu().numpy() for i in train_policy: train_policy_list.append(i) predictions = y_pred.data.cpu().numpy() for prediction in predictions: train_output.append(prediction) # Tally the number of correct predictions # Update parameters #optimizer.zero_grad() #oss.backward() #optimizer.step() # Print interim results accuracy = (trn_corr/(b*10)) print(f'train, epoch: {i} batch: {b}, Accuracy: {accuracy}, loss: {loss.item():10.8f}') train_losses.append(loss) train_correct.append(trn_corr) # Run the testing batches with torch.no_grad(): for b, (image, label, policy) in enumerate(test_loader): # Limit the number of batches image.cuda() label.cuda() if b == max_tst_batch: break # Apply the model y_val = model(image) test_policy = policy.data.cpu().numpy() for i in test_policy: test_policy_list.append(i) predictions_test = y_val.data.cpu().numpy() for prediction in predictions_test: test_output.append(prediction) # Tally the number of correct predictions predicted = torch.max(y_val.data, 1)[1] tst_corr += (predicted == label).sum() print(f'test, epoch: {i:2} batch:{b:4} loss:{loss.item():10.8f}') loss = criterion(y_val, label) test_losses.append(loss) test_correct.append(tst_corr)
print(f’\nDuration: {time.time() - start_time:.0f} seconds’) # print the time elapsed
I set everything to the GPU so I’m not sure why it’s expecting CPU. Error below.
RuntimeError Traceback (most recent call last)
in
35
36 # Apply the model
—> 37 y_pred = model(image)
38
39 predicted = torch.max(y_pred.data, 1)[1]C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
545 result = self._slow_forward(*input, **kwargs)
546 else:
→ 547 result = self.forward(*input, **kwargs)
548 for hook in self._forward_hooks.values():
549 hook_result = hook(self, input, result)C:\ProgramData\Anaconda3\lib\site-packages\torchvision\models\resnet.py in forward(self, x)
194
195 def forward(self, x):
→ 196 x = self.conv1(x)
197 x = self.bn1(x)
198 x = self.relu(x)C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
545 result = self._slow_forward(*input, **kwargs)
546 else:
→ 547 result = self.forward(*input, **kwargs)
548 for hook in self._forward_hooks.values():
549 hook_result = hook(self, input, result)C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
341
342 def forward(self, input):
→ 343 return self.conv2d_forward(input, self.weight)
344
345 class Conv3d(_ConvNd):C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\conv.py in conv2d_forward(self, input, weight)
338 _pair(0), self.dilation, self.groups)
339 return F.conv2d(input, weight, self.bias, self.stride,
→ 340 self.padding, self.dilation, self.groups)
341
342 def forward(self, input):RuntimeError: Expected object of backend CPU but got backend CUDA for argument #2 ‘weight’