Model fails to converge upon image resize

I don’t quite understand this strange behavior when changing image_size.
I am using the ants and bees dataset from transfer-learning tutorial:

https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html

#!/usr/bin/env python
# coding: utf-8

# # Simple Convnet

# In[1]:


import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import torchvision
from torchvision import transforms
from PIL import Image


# In[2]:


print(torch.__version__)

1.1




img_transforms = transforms.Compose([
    transforms.Resize((128,128)),    
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                    std=[0.5, 0.5, 0.5] )
    ])


# In[32]:


train_data_path = "main_dir/train/"
train_data = torchvision.datasets.ImageFolder(root=train_data_path,
                                              transform=img_transforms, )

classes = len(train_data.classes)


# In[33]:


val_data_path = "main_dir/val/"
val_data = torchvision.datasets.ImageFolder(root=val_data_path,
                                            transform=img_transforms, 
                                           )


# In[34]:


test_data_path = "main_dir/test/"
test_data = torchvision.datasets.ImageFolder(root=test_data_path,
                                             transform=img_transforms, 
                                             ) 




# In[35]:


batch_size= 20
train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle = True)
val_data_loader  = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle = True) 
test_data_loader  = torch.utils.data.DataLoader(test_data, batch_size=batch_size) 


# In[36]:


data_loader = train_data_loader, val_data_loader


# In[ ]:



# Create Model


class CNNNet(nn.Module):

    def __init__(self, num_classes):
        super(CNNNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Linear(4096, num_classes)
        )
    
    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


# In[38]:


# set up training

def train(model, optimizer, loss_fn, data_loaders, epochs=20):
    
    if torch.cuda.is_available():
        device = torch.device("cuda") 
    else:
        device = torch.device("cpu")
        
        
    model.to(device)

        
        
    #move model to cuda
    
    train_loader, val_loader = data_loaders
    
    for epoch in range(epochs):
        training_loss = 0.0
        valid_loss = 0.0
        model.train()
        for batch in train_loader:
            optimizer.zero_grad()
            inputs, targets = batch
            inputs = inputs.to(device)
            targets = targets.to(device)
            output = model(inputs)
            loss = loss_fn(output, targets)
            loss.backward()
            optimizer.step()
            training_loss += loss.data.item() * inputs.size(0) #item() converts to scalar
        training_loss /= len(train_loader.dataset)
        
        model.eval()
        num_correct = 0 
        num_examples = 0
        for batch in val_loader:
            inputs, targets = batch
            inputs = inputs.to(device)
            output = model(inputs)
            targets = targets.to(device)
            loss = loss_fn(output,targets) 
            valid_loss += loss.data.item() * inputs.size(0)
            correct = torch.eq(torch.max(F.softmax(output), dim=1)[1], targets).view(-1)
            num_correct += torch.sum(correct).item()
            num_examples += correct.shape[0]
        valid_loss /= len(val_loader.dataset)

        print('Epoch: {}, Training Loss: {:.2f}, Validation Loss: {:.2f}, accuracy = {:.2f}'.format(epoch, training_loss,
        valid_loss, num_correct / num_examples))




model = CNNNet(classes)

optimizer = optim.Adam(model.parameters(), lr=0.001)


# In[40]:


train(model, optimizer, torch.nn.CrossEntropyLoss(), data_loader, epochs=200)

If image size is 64:

transforms.Resize((64,64))

I get the following training output
(as you can see the accuracy is decreasing as expected)

Epoch: 25, Training Loss: 0.73, Validation Loss: 0.66, accuracy = 0.57
Epoch: 26, Training Loss: 0.63, Validation Loss: 0.67, accuracy = 0.61
Epoch: 27, Training Loss: 0.58, Validation Loss: 0.61, accuracy = 0.69
Epoch: 28, Training Loss: 0.53, Validation Loss: 0.69, accuracy = 0.69
Epoch: 29, Training Loss: 0.49, Validation Loss: 0.62, accuracy = 0.68
Epoch: 30, Training Loss: 0.50, Validation Loss: 1.12, accuracy = 0.52
Epoch: 31, Training Loss: 0.52, Validation Loss: 0.74, accuracy = 0.63
Epoch: 32, Training Loss: 0.44, Validation Loss: 0.95, accuracy = 0.67
Epoch: 33, Training Loss: 0.43, Validation Loss: 0.81, accuracy = 0.60
Epoch: 34, Training Loss: 0.48, Validation Loss: 0.80, accuracy = 0.60
Epoch: 35, Training Loss: 0.40, Validation Loss: 0.89, accuracy = 0.69
Epoch: 36, Training Loss: 0.37, Validation Loss: 0.79, accuracy = 0.65
Epoch: 37, Training Loss: 0.33, Validation Loss: 0.92, accuracy = 0.61
Epoch: 38, Training Loss: 0.37, Validation Loss: 1.26, accuracy = 0.59
Epoch: 39, Training Loss: 0.35, Validation Loss: 0.92, accuracy = 0.58
Epoch: 40, Training Loss: 0.32, Validation Loss: 1.08, accuracy = 0.62
Epoch: 41, Training Loss: 0.22, Validation Loss: 1.41, accuracy = 0.61
Epoch: 42, Training Loss: 0.16, Validation Loss: 3.20, accuracy = 0.66
Epoch: 43, Training Loss: 0.63, Validation Loss: 0.69, accuracy = 0.51
Epoch: 44, Training Loss: 0.59, Validation Loss: 0.67, accuracy = 0.59
Epoch: 45, Training Loss: 0.49, Validation Loss: 0.92, accuracy = 0.66
Epoch: 46, Training Loss: 0.38, Validation Loss: 0.82, accuracy = 0.65
Epoch: 47, Training Loss: 0.27, Validation Loss: 1.06, accuracy = 0.65
Epoch: 48, Training Loss: 0.20, Validation Loss: 1.33, accuracy = 0.58
Epoch: 49, Training Loss: 0.16, Validation Loss: 1.85, accuracy = 0.58
Epoch: 50, Training Loss: 0.36, Validation Loss: 0.98, accuracy = 0.60
Epoch: 51, Training Loss: 0.43, Validation Loss: 0.90, accuracy = 0.63
Epoch: 52, Training Loss: 0.36, Validation Loss: 0.91, accuracy = 0.64
Epoch: 53, Training Loss: 0.23, Validation Loss: 1.71, accuracy = 0.64
Epoch: 54, Training Loss: 0.14, Validation Loss: 1.69, accuracy = 0.65
Epoch: 55, Training Loss: 0.11, Validation Loss: 2.53, accuracy = 0.60

However, upon increasing the resize to 256 or 512, the model fails to converge and the losses stay fixed for what seems to be perpetuity.

tranforms.Resize((256,256))

Epoch: 0, Training Loss: 0.86, Validation Loss: 0.69, accuracy = 0.54
Epoch: 1, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 2, Training Loss: 0.70, Validation Loss: 0.69, accuracy = 0.46
Epoch: 3, Training Loss: 0.69, Validation Loss: 0.71, accuracy = 0.46
Epoch: 4, Training Loss: 0.70, Validation Loss: 0.69, accuracy = 0.54
Epoch: 5, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 6, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 7, Training Loss: 0.68, Validation Loss: 0.68, accuracy = 0.62
Epoch: 8, Training Loss: 0.67, Validation Loss: 0.67, accuracy = 0.62
Epoch: 9, Training Loss: 0.65, Validation Loss: 0.67, accuracy = 0.59
Epoch: 10, Training Loss: 0.63, Validation Loss: 1.27, accuracy = 0.46
Epoch: 11, Training Loss: 0.73, Validation Loss: 0.69, accuracy = 0.54
Epoch: 12, Training Loss: 0.70, Validation Loss: 0.69, accuracy = 0.54
Epoch: 13, Training Loss: 0.72, Validation Loss: 0.69, accuracy = 0.54
Epoch: 14, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 15, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 16, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 17, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 18, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 19, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 20, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 21, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 22, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 23, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 24, Training Loss: 0.72, Validation Loss: 0.70, accuracy = 0.46
Epoch: 25, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 26, Training Loss: 0.70, Validation Loss: 0.69, accuracy = 0.54
Epoch: 27, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 28, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 29, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 30, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 31, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 32, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 33, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 34, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 35, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 36, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 37, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 38, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 39, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 40, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 41, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 42, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 43, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 44, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 45, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 46, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 47, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 48, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 49, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 50, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 51, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 52, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 53, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 54, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 55, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 56, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 57, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 58, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 59, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 60, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 61, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 62, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 63, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 64, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 65, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 66, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 67, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 68, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 69, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 70, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 71, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 72, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 73, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 74, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 75, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 76, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 77, Training Loss: 0.70, Validation Loss: 0.70, accuracy = 0.46
Epoch: 78, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 79, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 80, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 81, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 82, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 83, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 84, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 85, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 86, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 87, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 88, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 89, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 90, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 91, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 92, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 93, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 94, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 95, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 96, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 97, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 98, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 99, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 100, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 101, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46

Similar with (512):

transforms.Resize((512,512))

Epoch: 97, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 98, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 99, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 100, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 101, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 102, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 103, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 104, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 105, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 106, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 107, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 108, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 109, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 110, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 111, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 112, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 113, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 114, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46

Epoch: 115, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 116, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 117, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 118, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 119, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 120, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 121, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 122, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 123, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 124, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 125, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 126, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 127, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 128, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 129, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 130, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 131, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 132, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 133, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 134, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 135, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 136, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 137, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 138, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 139, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 140, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 141, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 142, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 143, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 144, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 145, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 146, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 147, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 148, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 149, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 150, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 151, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 152, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 153, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 154, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 155, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 156, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 157, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 158, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 159, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 160, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 161, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 162, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 163, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 164, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 165, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 166, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 167, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 168, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 169, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 170, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 171, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 172, Training Loss: 0.69, Validation Loss: 0.70, accuracy = 0.46
Epoch: 173, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 174, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 175, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 176, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 177, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 178, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 179, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.54
Epoch: 180, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 181, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 182, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 183, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 184, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 185, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 186, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 187, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 188, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 189, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 190, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 191, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 192, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 193, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 194, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 195, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 196, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 197, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 198, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46
Epoch: 199, Training Loss: 0.69, Validation Loss: 0.69, accuracy = 0.46

My training loss isn’t decreasing.
Is this normal behavior?