Loss not decreasing for image regression

Hello, I want to train a model, which uses an image as an input and predicts a float number between 0 and 360. When training the model, training loss and validation loss are not decreasing.
I used the following Model:

torch.autograd.set_detect_anomaly(True)

import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
        self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(64 * 16 * 16, 256)
        self.fc2 = nn.Linear(256, 1)
        self.dropout = nn.Dropout(0.25)

    def forward(self, x):
        # add sequence of convolutional and max pooling layers
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = x.view(-1,64 * 16 * 16)
        x = self.dropout(x)
        x = F.relu(self.fc1(x))
        #x = self.fc1(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x

if train_on_gpu:
    model.cuda()

The images are normalized and resized to 128 x 128. I tried approaches that I found in this forum, including learning rate scheduling, but that did not work for me.

import torch.optim as optim

from torch.optim.lr_scheduler import StepLR,ReduceLROnPlateau

optimizer = optim.SGD(model.parameters(),lr=0.001,momentum=0.9,weight_decay=0.01, nesterov=True)

scheduler=ReduceLROnPlateau(optimizer, mode='max', factor=0.7, patience=3,verbose=True)

criterion = nn.MSELoss().cuda()

I’m training with a batch size of 32. My train and eval code looks like this:

n_epochs = 50

valid_loss_min = np.Inf

for epoch in range(1, n_epochs+1):

    train_loss = 0.0
    valid_loss = 0.0

    model.train()
    for data, target in tqdm(train_loader):
        if train_on_gpu:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
    
        #print(label.data)
        output = model(data)
        target = target.unsqueeze(1)
        loss = criterion(output.float(), target.float())
        loss.backward()
        optimizer.step()
        train_loss += loss.item()*data.size(0)
        
    model.eval()
    for data, target in valid_loader:
        if train_on_gpu:
            data, target = data.cuda(), target.cuda()
        output = model(data)
        target = target.unsqueeze(1)
        loss = criterion(output.float(), target.float())
        valid_loss += loss.item()*data.size(0)
    
    train_loss = train_loss/len(train_loader.sampler)
    valid_loss = valid_loss/len(valid_loader.sampler)
        
    print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
        epoch, train_loss, valid_loss))
    
    if valid_loss <= valid_loss_min:
        print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
        valid_loss_min,
        valid_loss))
        torch.save(model.state_dict(), 'model.pt')
        valid_loss_min = valid_loss
    scheduler.step(valid_loss)

I could not find the mistake in my code. Is my model not suited for image regression tasks? If so, what would I have to modify? Until now I only worked with classifiers.
Thank you :slight_smile: