Train error 0 but test error is huge

this is my code I am doing a Sign language classifier I have 29 classes

train_dir = '/content/asl/train/dataset'
test_dir = '/content/asl/test'
train_transforms = transforms.Compose([
                                    transforms.Resize((120, 120)),
                                    transforms.ToTensor(),
])
test_transforms = transforms.Compose([
                                      transforms.Resize((120, 120)),
                                      transforms.ToTensor(),
])

train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)


trainloader = torch.utils.data.DataLoader(train_data,batch_size = 60)
testloader = torch.utils.data.DataLoader(test_data,batch_size = 60)

classes = testloader.dataset.classes

model = models.resnet50(pretrained=True, progress=True)
print(model)

for param in model.parameters():
  # print(param)
  param.requires_grad = False


# Re-define the last layer i.e.(```(fc): Linear(in_features=2048, out_features=1000, bias=True)```)
model.fc = nn.Sequential(
    nn.Linear(2048,512), #Applies a linear transformation to the incoming datay = xA^T + b
    nn.ReLU(), # apply element wise non linearity
    nn.Dropout(0.2), #randomly zeroes some of the elements of the input tensor
    nn.Linear(512,29),
    nn.LogSoftmax(dim=1) #see what is dim 
)

# definie optimizer , loss
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.fc.parameters(), lr = 0.003)
model.to(device)

epochs = 15
steps = 0
running_loss = 0
print_every = 10
train_losses, test_losses = [], []

for epoch in range(epochs):
  for inputs, labels in trainloader:
    # print(inputs.shape())
    steps+=1
    inputs,labels = inputs.to(device), labels.to(device)
    optimizer.zero_grad()
    outputs = model.forward(inputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()
    running_loss = loss.item()

    if steps%print_every==0:
      test_loss =0
      accuracy=0
      model.eval()
      with torch.no_grad():
        for inputs,labels in testloader:
          inputs, labels = inputs.to(device), labels.to(device)
          outputs = model.forward(inputs)
          batch_loss = criterion(outputs, labels)
          test_loss += batch_loss.item()
          ps = torch.exp(outputs)
          top_p, top_class = ps.topk(1, dim = 1)
          equals = top_class == labels.view(*top_class.shape)
          accuracy = torch.mean(equals.type(torch.FloatTensor)).item()
      train_losses.append(running_loss/(len(trainloader)))
      test_losses.append(test_loss/len(testloader))
      print(f"Epoch {epoch+1}/{epochs}.. "
            f"Train loss: {running_loss/print_every:.3f}.. "
            f"Test loss: {test_loss/len(testloader):.3f}.. "
            f"Test accuracy: {accuracy/len(testloader):.3f}")
      running_loss = 0
      model.train()

torch.save(model, 'maskmodel.pk') #mask model

but when I trained the model I am getting this output which is not correct ig, I couldn’t figure out what’s the reason

Epoch 1/15.. Train loss: 0.000.. Test loss: 444.425.. Test accuracy: 0.000
Epoch 1/15.. Train loss: 0.000.. Test loss: 477.106.. Test accuracy: 0.000
Epoch 1/15.. Train loss: 0.000.. Test loss: 461.048.. Test accuracy: 0.000
Epoch 1/15.. Train loss: 0.000.. Test loss: 472.640.. Test accuracy: 0.000
Epoch 1/15.. Train loss: 0.000.. Test loss: 517.326.. Test accuracy: 0.000
Epoch 1/15.. Train loss: 0.000.. Test loss: 82.534.. Test accuracy: 0.000
.
.
.

it’s going on like this
the same think worked for me in 2,3 class classification

I would recommend to validate the results manually and make sure the training and test accuracy are correctly calculated. Sometimes when you are facing zero losses or a zero accuracy, the calculation might have created an overflow or is rounding to zero etc.

Also, nn.CrossEntropyLoss expects raw logits as the model output, while you are creating log probabilities.
Remove the nn.LogSoftmax or use nn.NLLLoss instead or nn.CrossEntropyLoss.