Save labels name when save model

Hello,
I’m new in image classification.
I start with eleven image type and I would like know how save the labels when the training is finish.
When I predict a picture with this model, I need to have the label name and I don’t want add this on the prediction code:

    classes = ('Cat', 'Dog', 'Bird', 'Airplane', 'Boat', 'Car', 'moto', 'bicycle', 'wheel',  'pen', 'apple')
    

Thank you for your help

import torch
import torchvision
import  torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np

import os 
os.environ['KMP_DUPLICATE_LIB_OK']='True'

if __name__ == "__main__":
    path_training_folder="Image" 
    
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    transform = transforms.Compose(
        [transforms.Resize(255),
         transforms.CenterCrop(255),         
         transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    
    dataset = torchvision.datasets.ImageFolder(root=path_training_folder,transform=transform)
    
    # Creating data indices for training and validation splits:
    validation_split = 0.3
    shuffle_dataset = True
    random_seed = 217
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    if shuffle_dataset :
        np.random.seed(random_seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]
    for i in range(7):      # * 2^4
        train_indices = train_indices + train_indices
    
    print("train indices number="+str(len(train_indices)))
    print("valid indices number="+str(len(val_indices)))
    
    # Creating PT data samplers and loaders:
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
    
    trainloader = torch.utils.data.DataLoader(dataset, batch_size=4,
                                              shuffle=False, num_workers=2, sampler=train_sampler)
     
    testloader = torch.utils.data.DataLoader(dataset, batch_size=4,
                                             shuffle=False, num_workers=2, sampler=valid_sampler)
    
    classes = ('Cat', 'Dog', 'Bird', 'Airplane', 'Boat', 'Car', 'moto', 'bicycle', 'wheel',  'pen', 'apple')
    
    def imshow(img):
        img = img / 2 + 0.5     # unnormalize
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        plt.show()
        
    # get some random training images
    dataiter = iter(trainloader)
    images, labels = dataiter.next()

    # print labels
    print('Check: ', ' - '.join('%5s' % classes[labels[j]] for j in range(4)))
       

    # show images
    imshow(torchvision.utils.make_grid(images))

    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.conv1 = nn.Conv2d(3, 4, 5)  ####
            self.pool = nn.MaxPool2d(2, 2)
            self.conv2 = nn.Conv2d(4, 16, 5) ####
            self.fc1 = nn.Linear(57600 , 1024) ####
            self.fc2 = nn.Linear(1024, 700)  ####
            self.fc3 = nn.Linear(700, 11)  #### 11 nb of classes

        def forward(self, x):
            x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
            x = F.max_pool2d(F.relu(self.conv2(x)), 2)
            x = x.view(-1, self.num_flat_features(x))
            #x = x.view(-1, 16 * 61 * 61)
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x

        def num_flat_features(self, x):
            size = x.size()[1:]  # all dimensions except the batch dimension
            num_features = 1
            for s in size:
                num_features *= s
            return num_features

    net = Net()
    net.to(device)

    # Training
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    epochs = 2
    steps = 0
    running_loss = 0
    print_every = 10
    train_losses, test_losses = [], []
    enum_max = 0

    for epoch in range(epochs):  # loop over the dataset multiple times
        running_loss = 0.0
        enum = 0
        enum_max = 0
        for i, data in enumerate(trainloader, 0):
            enum = enum+1
            steps += 1
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data[0].to(device), data[1].to(device)

            # zero the parameter gradients
            optimizer.zero_grad()           
            
            logps = net(inputs)
            loss = criterion(logps, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            if steps % print_every == 0:
              test_loss = 0
              accuracy = 0
              net.eval()
              with torch.no_grad():                
                for inputs, labels in testloader:
                    inputs, labels = inputs.to(device), labels.to(device)
                    logps = net.forward(inputs)
                    batch_loss = criterion(logps, labels)
                    test_loss += batch_loss.item()
                    ps = torch.exp(logps)
                    top_p, top_class = ps.topk(1, dim=1)
                    equals = top_class == labels.view(*top_class.shape)
                    accuracy += torch.mean(equals.type(torch.FloatTensor)).item() 
              train_losses.append(running_loss/len(trainloader))
              test_losses.append(test_loss/len(testloader))                    
              print(f"Epoch {epoch+1}/{epochs}.. "
                    f"Train loss: {running_loss/print_every:.3f}.. "
                    f"Test loss: {test_loss/len(testloader):.3f}.. "
                    f"Test accuracy: {accuracy/len(testloader):.3f}")
              
              if steps % 100 == 0:
                plt.plot(train_losses, label='Training loss')
                plt.plot(test_losses, label='Validation loss')
                plt.legend(frameon=False)
                plt.show()
              if (round((running_loss/print_every),3) == 0.000 and enum_max == 0):
                print(enum)
              running_loss = 0
              net.train()
    PATH = 'trainv2.pth'
    torch.save(net.state_dict(), PATH)
    print('Finished Training')

    # get some random test images
    dataiter = iter(testloader)
    images, labels = dataiter.next()

    # print images and label
    imshow(torchvision.utils.make_grid(images))
    print('GroundTruth: ', ' - '.join('%5s' % classes[labels[j]] for j in range(4)))




      
     
     

You can use torch.save to save e.g. a dict object, which could contain the model.state_dict(), your class name mapping and any additional objects.

1 Like