Some one explain me how to do this

error

Iterate over data.

—> 22 for i, data in enumerate(trainloader, 0):
23 inputs, labels = data
24 inputs, labels = inputs.cuda(),labels.cuda()

this is the code
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()

best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0

for epoch in range(num_epochs):
    print('Epoch {}/{}'.format(epoch, num_epochs - 1))
    print('-' * 10)

    # Each epoch has a training and validation phase
    for phase in ['train', 'val']:
        if phase == 'train':
            model.train()  # Set model to training mode
        else:
            model.eval()   # Set model to evaluate mode

        running_loss = 0.0
        running_corrects = 0

        # Iterate over data.
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data
            inputs, labels = inputs.cuda(),labels.cuda()

           

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # statistics
           # print statistics
            running_loss += loss.item() * inputs.size(0)
            out = torch.argmax(outputs.detach(),dim=1)
            assert out.shape==labels.shape
            running_acc += (labels==out).sum().item()
        print(f"Train loss {epoch+1}: {running_loss/len(trainset)},Train Acc:{running_acc*100/len(trainset)}%")

        correct = 0
        model.train(True)
        with torch.no_grad():
            for inputs,labels in valloader:
                out = model(inputs.cuda()).cpu()
                out = torch.argmax(out,dim=1)
                acc = (out==labels).sum().item()
                correct += acc
        print(f"Val accuracy:{correct*100/len(valset)}%")
        if correct>best_val_acc:
            best_val_acc = correct
            best_val_model = deepcopy(model.state_dict())
        lr_scheduler.step()

for i, data in enumerate(trainloader):

~\AppData\Local\Temp/ipykernel_13884/1642471687.py in train_model(model, criterion, optimizer, scheduler, num_epochs)
8 running_loss = 0.0
9 running_acc = 0
—> 10 for i, data in enumerate(trainloader, 0):
11 inputs, labels = data
12 inputs, labels = inputs.cuda(),labels.cuda()

NameError: name ‘trainloader’ is not defined

showing in this way but I don’t know how to solve it I am trying

This is because you have not define the trainloader, trainloader is a generator of the data you want.
I will give you a example of how to have a trainloader(CIFAR10)

  transform_train = transforms.Compose(
    [
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ]
    )
    transform_test = transforms.Compose(
        [
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ]
    )

    trainset = torchvision.datasets.CIFAR10(
        root="../data", train=True, download=True, transform=transform_train
    )
    # train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
    train_loader = torch.utils.data.DataLoader(
        trainset,
        batch_size=args.batches,
        shuffle=True,
        num_workers=12,
        drop_last=True,
        # sampler=train_sampler,
        pin_memory=True,
    )

    testset = torchvision.datasets.CIFAR10(
        root="../data", train=False, download=True, transform=transform_test
    )
    val_loader = torch.utils.data.DataLoader(
        testset,
        batch_size=args.batches,
        shuffle=False,
        num_workers=12,
        drop_last=True,
        pin_memory=True,
    )

There are trainset and trainloader, transet is the list of training data(not modified), trainloader is the generator of the dataset and it could also do some augumentation and transforms.

from future import print_function, division

import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import time
import os
import copy

cudnn.benchmark = True
plt.ion() # interactive mode

data_transforms = {
‘train’: transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
‘val’: transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}

data_dir = ‘C:\my data’
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in [‘train’, ‘val’]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=6,
shuffle=True, num_workers=6)
for x in [‘train’, ‘val’]}
dataset_sizes = {x: len(image_datasets[x]) for x in [‘train’, ‘val’]}
class_names = image_datasets[‘train’].classes

device = torch.device(“cuda:0” if torch.cuda.is_available() else “CPU”)

train_data = datasets.CIFAR10(
root = “C:\my data”,
train = True,
download = True,
transform = transform)

train_data, val_data = torch.utils.data.random_split(
train_data, [int(len(train_data) * 0.8), int(len(train_data)*0.2)])

test_data = datasets.CIFAR10(
root = “C:\my data”,
train = False,
download = True,
transform = transform)

classes = test_data.classes
dic_classes = {}
for i in range(len(classes)):
dic_classes[i] = classes[i]

print(dic_classes)

trainloader = torch.utils.data.DataLoader(
train_data, batch_size=6, shuffle=True)
valloader = torch.utils.data.DataLoader(
val_data, batch_size=6, shuffle=True)
testloader = torch.utils.data.DataLoader(
test_data, batch_size=6, shuffle=False)

def imshow(inp, title=None):
“”“Imshow for Tensor.”""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated

Get a batch of training data

inputs, classes = next(iter(dataloaders[‘train’]))

Make a grid from batch

out = torchvision.utils.make_grid(inputs)

imshow(out, title=[class_names[x] for x in classes])

def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()

best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(25):  
    model.train(True)
    running_loss = 0.0
    running_acc = 0
    for i, data in enumerate(trainloader):
        inputs, labels = data
        inputs, labels = inputs.cuda(),labels.cuda()

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.item() * inputs.size(0)
        out = torch.argmax(outputs.detach(),dim=1)
        assert out.shape==labels.shape
        running_acc += (labels==out).sum().item()
    print(f"Train loss {epoch+1}: {running_loss/len(trainset)},Train Acc:{running_acc*100/len(trainset)}%")

correct = 0
model.train(False)
with torch.no_grad():
    for inputs,labels in valloader:
        out = model(inputs.cuda()).cpu()
        out = torch.argmax(out,dim=1)
        acc = (out==labels).sum().item()
        correct += acc
    print(f"Val accuracy:{correct*100/len(valset)}%")
if correct>best_val_acc:
    best_val_acc = correct
    best_val_model = deepcopy(model.state_dict())
lr_scheduler.step()

def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()

with torch.no_grad():
    for i, (inputs, labels) in enumerate(dataloaders['val']):
        inputs = inputs.to(device)
        labels = labels.to(device)

        outputs = model(inputs)
        _, preds = torch.max(outputs, 1)

        for j in range(inputs.size()[0]):
            images_so_far += 1
            ax = plt.subplot(num_images//2, 2, images_so_far)
            ax.axis('off')
            ax.set_title(f'predicted: {class_names[preds[j]]}')
            imshow(inputs.cpu().data[j])

            if images_so_far == num_images:
                model.train(mode=was_training)
                return
    model.train(mode=was_training)

model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features

Here the size of each output sample is set to 2.

Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).

model_ft.fc = nn.Linear(num_ftrs, 2)

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss()

Observe that all parameters are being optimized

optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)

Decay LR by a factor of 0.1 every 7 epochs

exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
The error is this:


NameError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_17208/2182967272.py in
----> 1 model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
2 num_epochs=25)

~\AppData\Local\Temp/ipykernel_17208/3799990550.py in train_model(model, criterion, optimizer, scheduler, num_epochs)
8 running_loss = 0.0
9 running_acc = 0
—> 10 for i, data in enumerate(trainloader):
11 inputs, labels = data
12 inputs, labels = inputs.cuda(),labels.cuda()

NameError: name ‘trainloader’ is not defined

can you check this code and tell me what to change I am doing it in a number of ways Iam getting errors only if you rewrite it also fine please help me with this

thank you