The accuracy of the test results of the trained model is zero.

Hi i am a beginner and i trained an image classification model with resnet18, but when i test the trained model with test files, none of the results are correct, i am writing the code here so you can help

import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
import tqdm

data_dir = ‘./dataset’

data_transforms = {
‘train’: transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),

'validation': transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),

'test': transforms.Compose([
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),

}

image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in [‘train’, ‘validation’, ‘test’]}

dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=0)
for x in [‘train’, ‘validation’, ‘test’]}

dataset_sizes = {x: len(image_datasets[x]) for x in [‘train’, ‘saglama’, ‘test’]}
class_names = image_datasets[‘train’].classes

device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)

Get a batch of training data

inputs, classes = next(iter(dataloaders[‘train’]))

Make a grid from batch

out = torchvision.utils.make_grid(inputs)

def train_model(model, criterion, optimizer, scheduler, num_epochs=10):
since = time.time()

best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0

for epoch in range(num_epochs):
    print('Epoch {}/{}'.format(epoch, num_epochs - 1))
    print('-' * 10)

    # Each epoch has a training and validation phase
    for phase in ['train', 'validation']:
        if phase == 'train':
            model.train()  # Set model to training mode
        else:
            model.eval()  # Set model to evaluate mode

        running_loss = 0.0
        running_corrects = 0

        # Iterate over data.
        for inputs, labels in tqdm.tqdm(dataloaders[phase]):
            inputs = inputs.to(device)
            labels = labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            # track history if only in train
            with torch.set_grad_enabled(phase == 'train'):
                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()

            # statistics
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
        if phase == 'train':
            scheduler.step()

        epoch_loss = running_loss / dataset_sizes[phase]
        epoch_acc = running_corrects.double() / dataset_sizes[phase]

        print('{} Loss: {:.4f} Acc: {:.4f}'.format(
            phase, epoch_loss, epoch_acc))

        # deep copy the model
        if phase == 'validation' and epoch_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

    print()

time_elapsed = time.time() - since
print('Egitim Tamamlandı {:.0f}m {:.0f}s'.format(
    time_elapsed // 60, time_elapsed % 60))
print('En Doğru Sağlama  : {:4f}'.format(best_acc))

# load best model weights
model.load_state_dict(best_model_wts)
return model

model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features

Here the size of each output sample is set to 2.

Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).

model_ft.fc = nn.Linear(num_ftrs, len(class_names))

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss()

Observe that all parameters are being optimized

optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)

Decay LR by a factor of 0.1 every 7 epochs

exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=300)

torch.save(model_ft, “model.pt”)

class_labels = [“Powdery_mildew”, “Healthy”, “YellowLeaf_Curl_Virus”, “Bacterial_Spot”]

total = 0 # total değişkenini tanımla ve sıfıra ayarla
correct = 0

since we’re not training, we don’t need to calculate the gradients for our outputs

with torch.no_grad():
for data in tqdm.tqdm(dataloaders[“test”]):
images, labels = data
# calculate outputs by running images through the network
outputs = model_ft(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()

    # Print the predicted labels
    for i in range(len(predicted)):
        print('Predicted class: {}'.format(class_labels[predicted[i]]))

print(‘Accuracy of the network on the %d test images: %d %%’ % (
dataset_sizes[“test”], 100 * correct / total))