Cannot Predict using Pretrained Model

I trained a model using AlexNet

class OurAlex(nn.Module):
    def __init__(self, num_classes=8):
        super(OurAlex, self).__init__()
        self.alexnet = torchvision.models.alexnet(pretrained=True)
        for param in self.alexnet.parameters():
            param.requires_grad = False

        # Add a avgpool here
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))

        # Replace the classifier layer
        # to customise it according to our output
        self.alexnet.classifier = nn.Sequential(
            nn.Linear(256 * 7 * 7, 1024),
            nn.Linear(1024, 256),
            nn.Linear(256, num_classes),
        )

this model was trained on dogs vs cats dataset
i saved the whole model using

        state = {
            "epoch": epoch,
            "state_dict": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
        torch.save(state, 'model.pth')

PROBLEM –
when i tried to load the saved model for predictions and testing the accuracy of it in validation loop

    checkpoint = torch.load("model.pth")
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])

it gave an error

model.load_state_dict(checkpoint['state_dict'])
TypeError: 'OurAlex' object is not subscriptable

I havent encountered any such error !

Please Help me out in this one …
Thank you

EDIT(adding the train loop)–

model = OurAlex(num_classes=2)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
EPOCHS = 1

TRAIN = False
losses = []


def training_loop(model, optimizer, epochs):
    for epoch in range(epochs):
        try:
            for img, lab in tqdm(Loader):
                img = img.to(device)
                lab = lab.to(device)
                predictions = model(img)
                loss = criterion(predictions, lab)
                optimizer.step()
                optimizer.zero_grad()
                losses.append(loss.item())
                print(f"loss:  {loss.item():.4f}")
        except Exception as e:
            print(str(e))
        state = {
            "epoch": epoch,
            "state_dict": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
        torch.save(state, filepath)


if TRAIN:
    training_loop(model, optimizer, EPOCHS)

This code works under google colab. What could be the problem in your case?

model = OurAlex()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2) 
epoch = 0
path = 'model.pth'

state = {
    "epoch": epoch,
    "state_dict": model.state_dict(),
    "optimizer": optimizer.state_dict(),
}
torch.save(state, path)

###############

checkpoint = torch.load(path)
model1 = OurAlex()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=1e-2) 

model1.load_state_dict(checkpoint['state_dict'])
optimizer1.load_state_dict(checkpoint['optimizer'])

Well i think there is a problem with model in particular

but still can you give train a look

lemme add the train loop too in

EDIT -
may there is some problem there

ALSO –
this i cannot interpret this error

TypeError: 'OurAlex' object is not subscriptable

If you have a (google colab) notebook, share it.

I am sorry i cannot do that ,I am not that familiar with colab .
thanks for your time bro .
but i seriously dont know what to do for this

If you want here is the full code of it

and the error

TypeError: 'OurAlex' object is not subscriptable
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from torchvision import transforms, datasets, models
import torchvision
from tqdm import tqdm
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# prepare data
convert = transforms.Compose(
    [
        transforms.Resize((128, 128)),
        transforms.RandomHorizontalFlip(0.2),
        transforms.ToTensor(),
    ]
)

# dataloader

data = datasets.ImageFolder(root="PetImages/", transform=convert)
Loader = DataLoader(data, batch_size=64, shuffle=True)


class OurAlex(nn.Module):
    def __init__(self, num_classes=8):
        super(OurAlex, self).__init__()
        self.alexnet = torchvision.models.alexnet(pretrained=True)
        for param in self.alexnet.parameters():
            param.requires_grad = False

        # Add a avgpool here
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))

        # Replace the classifier layer
        # to customise it according to our output
        self.alexnet.classifier = nn.Sequential(
            nn.Linear(256 * 7 * 7, 1024),
            nn.Linear(1024, 256),
            nn.Linear(256, num_classes),
        )

    def forward(self, x):
        x = self.alexnet.features(x)
        x = self.avgpool(x)
        x = x.view(-1, 256 * 7 * 7)
        x = self.alexnet.classifier(x)
        return x


model = OurAlex(num_classes=2)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
EPOCHS = 1

TRAIN = False
losses = []


def training_loop(model, optimizer, epochs):
    for epoch in range(epochs):
        try:
            for img, lab in tqdm(Loader):
                img = img.to(device)
                lab = lab.to(device)
                predictions = model(img)
                loss = criterion(predictions, lab)
                optimizer.step()
                optimizer.zero_grad()
                losses.append(loss.item())
                print(f"loss:  {loss.item():.4f}")
        except Exception as e:
            print(str(e))
        state = {
            "epoch": epoch,
            "state_dict": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
        torch.save(state, filepath)


if TRAIN:
    training_loop(model, optimizer, EPOCHS)

TEST = True


def test():
    test = datasets.ImageFolder(root="PetTest/", transform=convert)
    testLoader = DataLoader(test, batch_size=16, shuffle=True)
    checkpoint = torch.load("catsvdogs.pth")
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    for params in model.parameters():
        params.requires_grad == False
    print(model)
# i was just testing the loop for loading the model 
# the loop is yet to be completed


if TEST:
    test()