Loss value doesn't decrease

I implemented a image classificaton algorithm and there are 10 classes.
I have small dataset with 216 rgb images.
When I train my network, loss value doesn’t decrease.
Here is my code:

class signLanguageDataset(Dataset):

    def __init__(self, h5_root_dir, train):
        self.h5_root_dir = h5_root_dir
        self.h_dataset = None
        self.train = train
        with h5py.File(self.h5_root_dir, 'r') as file:
            self.total = [str(list(file)[i][: len(list(file)[i])-5]) for i in range(len(file)) if(list(file)[i][len(list(file)[i])-4:] == 'flow')]

    def __len__(self):
        return len(self.total)

    def __getitem__(self, idx):

        if torch.is_tensor(idx):
            idx = idx.tolist()
        if self.h_dataset is None:
            self.h_dataset = h5py.File(self.h5_root_dir, 'r')
        flow = self.h_dataset[self.total[idx]+'_flow'][()]
        spt = self.h_dataset[self.total[idx] + '_spt'][()]
        label = self.h_dataset[self.total[idx] + '_label'][()][0]
        
        spt, flow, label = self.transform(spt, flow, label, self.train)
        
        sample = {'flow': flow, 'spt': spt, 'label': label}
        
        return sample

    def transform(self, spt, flow, label, train):
        flow_parse = [flow[..., i] for i in range(flow.shape[2])]

        toPil = transforms.ToPILImage()
        jitter = transforms.ColorJitter()
        spt = toPil(spt)
        flow_parse = [toPil(img) for img in flow_parse]

        if train:
            i, j, h, w = transforms.RandomCrop.get_params(spt, (224,224))
            spt = TF.crop(spt, i, j, h, w)
            flow_parse = [TF.crop(img, i, j, h, w) for img in flow_parse]
            
            if np.random.random() > 0.5:
                spt = TF.hflip(spt)
                flow_parse = [TF.hflip(img) for img in flow_parse]
        
        else:
            spt = TF.center_crop(spt, (224,224))
            flow_parse = [TF.center_crop(img, (224,224)) for img in flow_parse]

        spt = jitter(spt)
        spt = TF.to_tensor(spt)
        spt = TF.normalize(spt, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        flow_parse = [TF.to_tensor(img) for img in flow_parse]
        flow_parse = [TF.normalize(img, [0.5], [0.5]) for img in flow_parse]
        flow_parse = [torch.reshape(img,(img.size(1), img.size(2))) for img in flow_parse]
        flow = torch.stack([img for img in flow_parse], dim=0)
        label = torch.tensor(label).long()

        return spt, flow, label
          

As you can see, the data set contains two different image type which are spt and flow(labels are joint).
Actually I train this separetly.
When I train these spt and flow together(fusion), my loss value is decreasing.
But if I train these separately, my model doesn’t learn anything.

Here is my Network architecture:

class TwoDCNN(nn.Module):  # (3 x 224 x 224)
    def __init__(self, in_channels, num_classes, drop=0.5):
        super(TwoDCNN, self).__init__()

        self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
        self.relu = nn.ReLU()
        self.conv1 = nn.Conv2d(in_channels=in_channels, kernel_size=7, out_channels=96, stride=2)
        self.bn1 = nn.BatchNorm2d(num_features=96)
        self.conv2 = nn.Conv2d(in_channels=96, kernel_size=5, out_channels=256, stride=2, padding=1)
        self.bn2 = nn.BatchNorm2d(num_features=256)
        self.conv3 = nn.Conv2d(in_channels=256, kernel_size=3, out_channels=512, stride=1, padding=1)
        self.bn3 = nn.BatchNorm2d(num_features=512)
        self.conv4 = nn.Conv2d(in_channels=512, kernel_size=3, out_channels=512, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(num_features=512)
        self.conv5 = nn.Conv2d(in_channels=512, kernel_size=3, out_channels=512, stride=1, padding=1)
        self.bn5 = nn.BatchNorm2d(num_features=512)
        self.drop = nn.Dropout(p=drop)
        self.fc6 = nn.Linear(in_features=5 * 5 * 512, out_features=4096)  # 5 x 5
        self.fc7 = nn.Linear(in_features=4096, out_features=2048)
        self.fc8 = nn.Linear(in_features=2048, out_features=num_classes)

    def forward(self, x):
        x = self.pool(self.relu(self.bn1(self.conv1(x))))
        x = self.pool(self.relu(self.bn2(self.conv2(x))))
        x = self.relu(self.bn3(self.conv3(x)))
        x = self.relu(self.bn4(self.conv4(x)))
        x = self.pool(self.relu(self.bn5(self.conv5(x))))
        x = x.view(-1, 5 * 5 * 512)
        x = self.drop(self.relu(self.fc6(x)))
        x = self.drop(self.relu(self.fc7(x)))
        x = self.fc8(x)

        return x

And here is my train function:

def train_model(.......):


    best_model_wts = copy.deepcopy(two_stream_cnn.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):

        # Each epoch has a training and validation phase
        for phase in ['train', 'validation']:
            if phase == 'train':
                cnn.train()  # Set model to training mode
            else:
                cnn.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            for i, sample in enumerate(dataloaders[phase]):
                flow, spt, labels = sample['flow'].to(device), sample['spt'].to(device, dtype=torch.float), sample['label'].to(device)
                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    # Get model outputs and calculate loss
                    outputs = cnn(spt)

                    loss = criterion(outputs, labels)


                    _, preds = torch.max(outputs, 1)

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                
                if lr_sch != None and phase == 'train':
                    lr_sch.step()
                    
                # statistics
                running_loss += loss.item() * spt.size(0)
                running_corrects += torch.sum(preds == labels.data)
 

            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc * 100))

I tried different hyper parameters like learnig rate and also I train this data in resnet18.
But the result remained the same.

Here is my initial parameters:

    num_epochs = 250
    batch_size = 2
    num_classes = 10
    drop_spt = 0.0


    CNN = TwoDCNN(3, num_classes, drop_spt)
    CNN.to(device)

    dataloaders = signLanDataLoader(batch_size)
    optimizer_spt = optim.Adam(CNN.parameters())
    lr_sch = lr_scheduler.CosineAnnealingLR(optimizer_spt, len(dataloaders['train']))
    criterion = nn.CrossEntropyLoss().to(device)

    train_model(......)
    

I also tried StepLR optimizer scheduler.

Here are results:

Epoch 1/250
----------
train Loss: 4.0784 Acc: 8.0460
validation Loss: 2.2950 Acc: 9.1954

Training complete in 0m 31s
Epoch 2/250
----------
train Loss: 2.3268 Acc: 10.3448
validation Loss: 2.3149 Acc: 9.1954

Training complete in 0m 10s
Epoch 3/250
----------
train Loss: 2.3422 Acc: 9.5785
validation Loss: 2.3080 Acc: 8.0460

Training complete in 0m 10s
Epoch 4/250
----------
train Loss: 2.3181 Acc: 9.5785
validation Loss: 2.3934 Acc: 11.4943

Training complete in 0m 10s
Epoch 5/250
----------
train Loss: 2.3862 Acc: 6.5134
validation Loss: 2.3104 Acc: 6.8966

Training complete in 0m 10s
Epoch 6/250
----------
train Loss: 2.3142 Acc: 13.0268
validation Loss: 2.3044 Acc: 11.4943

Training complete in 0m 10s
Epoch 7/250
----------
train Loss: 2.3369 Acc: 10.3448
validation Loss: 2.3203 Acc: 11.4943

Training complete in 0m 10s
Epoch 8/250
----------
train Loss: 2.3093 Acc: 11.4943
validation Loss: 2.3650 Acc: 5.7471

Training complete in 0m 10s
Epoch 9/250
----------
train Loss: 2.3147 Acc: 11.1111
validation Loss: 2.3100 Acc: 10.3448

Training complete in 0m 10s
Epoch 10/250
----------
train Loss: 2.3122 Acc: 12.6437
validation Loss: 2.3171 Acc: 9.1954

Training complete in 0m 10s
Epoch 11/250
----------
train Loss: 2.3077 Acc: 11.4943
validation Loss: 2.3102 Acc: 11.4943

Training complete in 0m 10s
Epoch 12/250
----------
train Loss: 2.3059 Acc: 11.4943
validation Loss: 2.3120 Acc: 5.7471

Training complete in 0m 10s
Epoch 13/250
----------
train Loss: 2.2962 Acc: 12.6437
validation Loss: 2.3100 Acc: 9.1954

Training complete in 0m 10s
Epoch 14/250
----------
train Loss: 2.3012 Acc: 11.8774
validation Loss: 2.3120 Acc: 8.0460

Training complete in 0m 10s
Epoch 15/250
----------
train Loss: 2.3003 Acc: 11.8774
validation Loss: 2.3107 Acc: 3.4483

Training complete in 0m 10s
Epoch 16/250
----------
train Loss: 2.2929 Acc: 10.3448
validation Loss: 2.3147 Acc: 5.7471

Training complete in 0m 10s
Epoch 17/250
----------
train Loss: 2.2957 Acc: 12.2605
validation Loss: 2.3013 Acc: 9.1954

Training complete in 0m 10s
Epoch 18/250
----------
train Loss: 2.3036 Acc: 14.1762
validation Loss: 2.4307 Acc: 5.7471

Training complete in 0m 10s
Epoch 19/250
----------
train Loss: 2.3066 Acc: 13.4100
validation Loss: 2.3023 Acc: 8.0460

Training complete in 0m 10s
Epoch 20/250
----------
train Loss: 2.2789 Acc: 12.6437
validation Loss: 2.6025 Acc: 10.3448

Training complete in 0m 10s
Epoch 21/250
----------
train Loss: 2.3840 Acc: 12.6437
validation Loss: 2.3095 Acc: 11.4943

Training complete in 0m 10s
Epoch 22/250
----------
train Loss: 2.2915 Acc: 11.4943
validation Loss: 2.3127 Acc: 11.4943

Training complete in 0m 10s
Epoch 23/250
----------
train Loss: 2.2920 Acc: 13.4100
validation Loss: 2.3110 Acc: 8.0460

Training complete in 0m 10s
Epoch 24/250
----------
train Loss: 2.2889 Acc: 12.2605
validation Loss: 2.3115 Acc: 8.0460

Training complete in 0m 10s
Epoch 25/250
----------
train Loss: 2.2901 Acc: 14.5594
validation Loss: 2.3141 Acc: 8.0460

Training complete in 0m 10s
Epoch 26/250
----------
train Loss: 2.2885 Acc: 13.4100
validation Loss: 2.3163 Acc: 8.0460

Training complete in 0m 10s
Epoch 27/250
----------
train Loss: 2.2903 Acc: 13.4100
validation Loss: 2.3204 Acc: 8.0460

Training complete in 0m 10s
Epoch 28/250
----------
train Loss: 2.2888 Acc: 13.4100
validation Loss: 2.3243 Acc: 8.0460

Training complete in 0m 10s
Epoch 29/250
----------
train Loss: 2.2905 Acc: 13.4100
validation Loss: 2.3209 Acc: 8.0460

Training complete in 0m 10s
Epoch 30/250
----------
train Loss: 2.2884 Acc: 13.4100
validation Loss: 2.3253 Acc: 8.0460

Training complete in 0m 10s
Epoch 31/250
----------
train Loss: 2.2905 Acc: 13.4100
validation Loss: 2.3187 Acc: 8.0460

Training complete in 0m 10s
Epoch 32/250
----------
train Loss: 2.2874 Acc: 13.4100
validation Loss: 2.3177 Acc: 8.0460

Training complete in 0m 10s
Epoch 33/250
----------
train Loss: 2.2888 Acc: 11.4943
validation Loss: 2.3186 Acc: 8.0460

Training complete in 0m 10s
Epoch 34/250
----------
train Loss: 2.2885 Acc: 11.1111
validation Loss: 2.3169 Acc: 8.0460

Training complete in 0m 10s
Epoch 35/250
----------
train Loss: 2.2891 Acc: 13.4100
validation Loss: 2.3179 Acc: 8.0460

Training complete in 0m 10s
Epoch 36/250
----------
train Loss: 2.2888 Acc: 13.4100
validation Loss: 2.3181 Acc: 8.0460

Training complete in 0m 10s
Epoch 37/250
----------
train Loss: 2.2892 Acc: 13.4100
validation Loss: 2.3195 Acc: 8.0460

Training complete in 0m 10s
Epoch 38/250
----------
train Loss: 2.2887 Acc: 13.4100
validation Loss: 2.3173 Acc: 8.0460

Training complete in 0m 10s
Epoch 39/250
----------
train Loss: 2.2895 Acc: 13.4100
validation Loss: 2.3135 Acc: 8.0460

Training complete in 0m 10s
Epoch 40/250
----------
train Loss: 2.2875 Acc: 13.4100
validation Loss: 2.3155 Acc: 8.0460

Training complete in 0m 10s
Epoch 41/250
----------
train Loss: 2.2895 Acc: 13.4100
validation Loss: 2.3127 Acc: 8.0460

Training complete in 0m 10s
Epoch 42/250
----------
train Loss: 2.2880 Acc: 13.4100
validation Loss: 2.3140 Acc: 8.0460

Training complete in 0m 10s
Epoch 43/250
----------
train Loss: 2.2900 Acc: 13.4100
validation Loss: 2.3183 Acc: 8.0460

Training complete in 0m 10s
Epoch 44/250
----------
train Loss: 2.2875 Acc: 13.4100
validation Loss: 2.3186 Acc: 8.0460

Training complete in 0m 10s
Epoch 45/250
----------
train Loss: 2.2901 Acc: 13.4100
validation Loss: 2.3221 Acc: 8.0460

Training complete in 0m 10s
Epoch 46/250
----------
train Loss: 2.2878 Acc: 13.4100
validation Loss: 2.3196 Acc: 8.0460

Training complete in 0m 10s
Epoch 47/250
----------
train Loss: 2.2889 Acc: 13.4100
validation Loss: 2.3198 Acc: 8.0460

Training complete in 0m 10s
Epoch 48/250
----------
train Loss: 2.2878 Acc: 13.4100
validation Loss: 2.3182 Acc: 8.0460

Training complete in 0m 10s
Epoch 49/250
----------
train Loss: 2.2899 Acc: 13.4100
validation Loss: 2.3180 Acc: 8.0460

Training complete in 0m 10s
Epoch 50/250
----------
train Loss: 2.2873 Acc: 13.4100
validation Loss: 2.3147 Acc: 8.0460

Training complete in 0m 10s
Epoch 51/250
----------
train Loss: 2.2899 Acc: 13.4100
validation Loss: 2.3127 Acc: 8.0460

Training complete in 0m 10s
Epoch 52/250
----------
train Loss: 2.2888 Acc: 13.4100
validation Loss: 2.3128 Acc: 8.0460

Training complete in 0m 10s
Epoch 53/250
----------
train Loss: 2.2894 Acc: 13.4100
validation Loss: 2.3151 Acc: 8.0460

Training complete in 0m 10s
Epoch 54/250
----------
train Loss: 2.2885 Acc: 13.4100
validation Loss: 2.3148 Acc: 8.0460

Training complete in 0m 10s
Epoch 55/250
----------
train Loss: 2.2886 Acc: 13.4100
validation Loss: 2.3166 Acc: 8.0460

Training complete in 0m 10s
Epoch 56/250
----------
train Loss: 2.2878 Acc: 13.4100
validation Loss: 2.3175 Acc: 8.0460

Training complete in 0m 10s
Epoch 57/250
----------
train Loss: 2.2893 Acc: 13.4100
validation Loss: 2.3238 Acc: 8.0460

Training complete in 0m 10s
Epoch 58/250
----------
train Loss: 2.2878 Acc: 13.4100
validation Loss: 2.3258 Acc: 8.0460

Training complete in 0m 10s
Epoch 59/250
----------
train Loss: 2.2897 Acc: 13.4100
validation Loss: 2.3202 Acc: 8.0460

Training complete in 0m 10s
Epoch 60/250
----------
train Loss: 2.2878 Acc: 13.4100
validation Loss: 2.3223 Acc: 8.0460

Training complete in 0m 10s
Epoch 61/250
----------
train Loss: 2.2891 Acc: 13.4100
validation Loss: 2.3192 Acc: 8.0460

Training complete in 0m 10s
Epoch 62/250
----------
train Loss: 2.2878 Acc: 13.4100
validation Loss: 2.3202 Acc: 8.0460

Training complete in 0m 10s
Epoch 63/250
----------
train Loss: 2.2887 Acc: 13.4100
validation Loss: 2.3199 Acc: 8.0460

Training complete in 0m 10s
Epoch 64/250
----------
train Loss: 2.2872 Acc: 13.4100
validation Loss: 2.3228 Acc: 8.0460

Training complete in 0m 10s
Epoch 65/250
----------
train Loss: 2.2888 Acc: 13.4100
validation Loss: 2.3227 Acc: 8.0460

Training complete in 0m 10s
Epoch 66/250
----------
train Loss: 2.2875 Acc: 13.4100
validation Loss: 2.3229 Acc: 8.0460

Training complete in 0m 10s
Epoch 67/250
----------
train Loss: 2.2893 Acc: 13.4100
validation Loss: 2.3257 Acc: 8.0460

Training complete in 0m 10s
Epoch 68/250
----------
train Loss: 2.2875 Acc: 13.4100
validation Loss: 2.3242 Acc: 8.0460

Training complete in 0m 10s
Epoch 69/250
----------
train Loss: 2.2897 Acc: 10.3448
validation Loss: 2.3213 Acc: 11.4943

What is the problem?

A batch size of 2 might be too small for batch norm layers, so you should either increase the batch size or maybe even remove the batch norm layers.
I cannot find anything obviously wrong in the code, so I would recommend to play around with the hyper-parameters more (such as removing the scheduler, changing the model architecture etc.).

1 Like

Thanks for reply @ptrblck .
I removed batch norm layers and decreased lr to 0.0001.
Finally, My loss value decreasing.
Also set batch size to 16.

Yes, you can use a dropout layer after linear layers, which would also be the standard use case, wouldn’t it?

@Erhan Good to hear it helped. :slight_smile:

1 Like