Code error can anyone explain me iam learning now pytorch

TypeError Traceback (most recent call last)
in
1 model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
----> 2 num_epochs=30)

in train_model(model, criterion, optimizer, scheduler, num_epochs)
21 # Iterate over data.
22 for inputs, labels in dataloaders[phase]:
—> 23 inputs = inputs.to(device)
24 labels = labels.to(device)
25

TypeError: to() received an invalid combination of arguments - got (type), but expected one of:

  • (torch.device device, torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)
  • (torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)
  • (Tensor tensor, bool non_blocking, bool copy, *, torch.memory_format memory_format)

Try to print what device is set to as it seems to be a type while a torch.device or str is expected.

1 Like

I have done it but again same I don’t understand why this comes every time for me

TypeError Traceback (most recent call last)
in
----> 1 model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,num_epochs=50)

in train_model(model, criterion, optimizer, scheduler, num_epochs)
21 # Iterate over data.
22 for inputs, labels in dataloaders[phase]:
—> 23 inputs = inputs.to(torch.device)
24 labels = labels.to(torch.device)
25

TypeError: to() received an invalid combination of arguments - got (type), but expected one of:

  • (torch.device device, torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)
  • (torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)
  • (Tensor tensor, bool non_blocking, bool copy, *, torch.memory_format memory_format)

iam using the PyTorch computer vision transfer learning code for 4000 data images to identify the photographer can anyone help me to get this done

In case you are stuck, please post a minimal and executable code snippet which would reproduce the issue.

1 Like

from future import print_function, division

import torch

import torch.nn as nn

import torch.optim as optim

from torch.optim import lr_scheduler

import torch.backends.cudnn as cudnn

import numpy as np

import torchvision

from torchvision import datasets, models, transforms

import matplotlib.pyplot as plt

import time

import os

import copy

cudnn.benchmark = True

plt.ion()

data_transforms = {
‘train’: transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
‘test’: transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}

data_dir = ‘drive/MyDrive/data’
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in [‘train’, ‘test’]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32,
shuffle=True, num_workers=2)
for x in [‘train’, ‘test’]}
dataset_sizes = {x: len(image_datasets[x]) for x in [‘train’, ‘test’]}
class_names = image_datasets[‘train’].classes

def imshow(inp, title=None):
“”“Imshow for Tensor.”“”
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated

Get a batch of training data

inputs, classes = next(iter(dataloaders[‘train’]))

Make a grid from batch

out = torchvision.utils.make_grid(inputs)

imshow(out, title=[class_names[x] for x in classes])

def train_model(model, criterion, optimizer, scheduler, num_epochs=50):
since = time.time()

best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0

for epoch in range(num_epochs):
    print(f'Epoch {epoch}/{num_epochs - 1}')
    print('-' * 10)

    # Each epoch has a training and validation phase
    for phase in ['train', 'test']:
        if phase == 'train':
            model.train()  # Set model to training mode
        else:
            model.eval()   # Set model to evaluate mode

        running_loss = 0.0
        running_corrects = 0

        # Iterate over data.
        for inputs, labels in dataloaders[phase]:
            inputs = inputs.to(torch.device)
            labels = labels.to(torch.device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            # track history if only in train
            with torch.set_grad_enabled(phase == 'train'):
                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()

            # statistics
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
        if phase == 'train':
            scheduler.step()

        epoch_loss = running_loss / dataset_sizes[phase]
        epoch_acc = running_corrects.double() / dataset_sizes[phase]

        print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

        # deep copy the model
        if phase == 'test' and epoch_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

    print()

time_elapsed = time.time() - since
print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
print(f'Best test Acc: {best_acc:4f}')

# load best model weights
model.load_state_dict(best_model_wts)
return model

def visualize_model(model, num_images=10):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()

with torch.no_grad():
    for i, (inputs, labels) in enumerate(dataloaders['test']):
        inputs = inputs.to(torch.device)
        labels = labels.to(torch.device)

        outputs = model(inputs)
        _, preds = torch.max(outputs, 1)

        for j in range(inputs.size()[0]):
            images_so_far += 1
            ax = plt.subplot(num_images//2, 2, images_so_far)
            ax.axis('off')
            ax.set_title(f'predicted: {class_names[preds[j]]}')
            imshow(inputs.cpu().data[j])

            if images_so_far == num_images:
                model.train(mode=was_training)
                return
    model.train(mode=was_training)

model_ft = models.resnet18(pretrained=True)

num_ftrs = model_ft.fc.in_features

Here the size of each output sample is set to 2.

Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).

model_ft.fc = nn.Linear(num_ftrs, len(class_names),48)

criterion = nn.CrossEntropyLoss()

Observe that all parameters are being optimized

optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)

Decay LR by a factor of 0.1 every 7 epochs

exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,num_epochs=50)

this is my almost code for training if can change anything and add please correct code I can try

thankyou

Your code is unfortunately neither executable nor properly formatted.
However, based on the posted code, inputs.to(torch.device) is wrong since torch.device is just a type.
Use .to('cuda') or .to(torch.device('cuda') instead.

hello
iam using Pytorch computer vision transfer learning code for training my data set of 4000 images according to the data set I have 48 classes my accuracy is getting in test is like 40% is there any suggestions for that I have divided data into 80-20 for train and validation