Loss not Converging for CNN Model

Image Transformation and Batch

transform = transforms.Compose([

                                transforms.Resize((100,100)),

                                transforms.ToTensor(),

                                transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])

                                ])

data_set = datasets.ImageFolder(root="/content/drive/My Drive/models/pokemon/dataset",transform=transform)

train_loader = DataLoader(data_set,batch_size=10,shuffle=True,num_workers=6)

Below is my Model

class pokimonClassifier(nn.Module):

  def __init__(self):

    super().__init__()

    self.conv1 = nn.Conv2d(3,6,3,1)

    self.conv2 = nn.Conv2d(6,18,3,1)

    self.fc1 = nn.Linear(23*23*18,520)

    self.fc2 = nn.Linear(520,400)

    self.fc3 = nn.Linear(400,320)

    self.fc4 = nn.Linear(320,149)

  def forward(self,x):

    x = F.relu(self.conv1(x))

    x = F.max_pool2d(x,2,2)

    x = F.relu(self.conv2(x))  

    x = F.max_pool2d(x,2,2)

    x = x.view(-1,23*23*18)

    x = F.relu(self.fc1(x))

    x = F.relu(self.fc2(x))

    x = F.relu(self.fc3(x))

    x = F.log_softmax(self.fc4(x), dim=1)

    return x

Creating Instance of model, Use GPU, Set Criterion and optimizer
Here is firsr set lr = 0.001 then later changed to 0.0001

model = pokimonClassifier()
model.to('cuda')
criterion = nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(model.parameters(),lr = 0.0001)

Training Dataset

for e in range(epochs):

  train_crt = 0

  for b,(train_x,train_y) in enumerate(train_loader):

    b+=1

    train_x, train_y = train_x.to('cuda'), train_y.to('cuda')

    # train model

    y_preds = model(train_x)

    loss = criterion(y_preds,train_y)

    # analysis model

    predicted = torch.max(y_preds,1)[1]

    correct = (predicted == train_y).sum()

    train_crt += correct

    # print loss and accuracy

    if b%50 == 0:

        print(f'Epoch {e} batch{b} loss:{loss.item()} ')

    # updating weights and bais

    optimizer.zero_grad()

    loss.backward()

    optimizer.step()

  train_loss.append(loss)

  train_correct.append(train_crt)

My loss value remains between 4 - 3 and its not converging to 0.
I am super new to deep learning and I don’t know much about it.

The dataset I am using is here: https://www.kaggle.com/thedagger/pokemon-generation-one

A help will be much appreciated.
Thank You

The problem may be because of the learning rate or the optimizer you are using. The optimizer is alright but I think the learning rate is too small. Once try between 5e-3 or 1e-2 for 10 to 20 epochs and check if the loss is decreasing or not.