Unexpected zero loss on first epoch

Hi, I’ve been trying these tutorials but always getting zero loss on first (and every) epoch unlike what the tutorial shows. I don’t know what I’m doing wrong. There are a few posts here on zero loss but I was unable to apply them to my case. Minimum test code is:

# model:
import torch
import torch.nn.functional as F

class Model(torch.nn.Module):
    def __init__(self, in_features=1, h1=3, out_features=1):
        super().__init__()
        self.fc1 = torch.nn.Linear(in_features, h1)
        self.out = torch.nn.Linear(h1, out_features)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.out(x)
        return x


# analyzer:
import torch
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from model import Model

class Analyzer:
    def __init__(
        self,
        p_data: DataFrame,
        p_inputColumns: list[str],
        p_targetColumns: list[str],
    ):
        self.__data = p_data
        self.__inputColumns = p_inputColumns
        self.__targetColumns = p_targetColumns

    def analyze(
        self,
        p_trainingEpochs: int = 100,
    ) -> list[float]:
        # create a subset with only the input/output columns as numpy arrays:
        l_inputData = self.data[self.inputColumns].values
        l_targetData = self.data[self.targetColumns].values

        # create the training and testing data:
        l_trainInput, l_testInput, l_trainTarget, l_testTarget = train_test_split(
            l_inputData, l_targetData, test_size=0.2, random_state=41
        )
        l_trainInput = torch.FloatTensor(l_trainInput)
        l_testInput = torch.FloatTensor(l_testInput)
        l_trainTarget = torch.FloatTensor(l_trainTarget)
        l_testTarget = torch.FloatTensor(l_testTarget)

        # set the criterion and optimizer:
        l_model = Model(in_features=len(self.inputColumns), h1=8, out_features=len(self.targetColumns))
        l_criterion = torch.nn.CrossEntropyLoss()
        l_optimizer = torch.optim.Adam(l_model.parameters(), lr=0.01)

        # train the model:
        l_losses = []
        for epoch in range(p_trainingEpochs):
            l_predictedTarget = l_model.forward(l_trainInput)
            l_loss = l_criterion(l_predictedTarget, l_trainTarget)
            l_losses.append(l_loss.detach().numpy())

            if epoch % 10 == 0:
                print(f'Epoch {epoch} loss: {l_loss}')

            # backpropagation:
            l_optimizer.zero_grad()
            l_loss.backward()
            l_optimizer.step()

        return l_losses

    @property
    def data(self):
        return self.__data

    @property
    def inputColumns(self):
        return self.__inputColumns

    @property
    def targetColumns(self):
        return self.__targetColumns


# main (fails assertion):
        from analyzer import Analyzer
        from sklearn import datasets

        l_data = datasets.load_iris(as_frame=True).frame
        l_analyzer = Analyzer(
            l_data,
            [
                'sepal length (cm)',
                'sepal width (cm)',
                'petal length (cm)',
                'petal width (cm)',
            ],
            ['target']
        )
        l_losses = l_analyzer.analyze(p_trainingEpochs=20)
        assert l_losses[0] != 0.0

nn.CrossEntropyLoss is used for multi-class classification use cases. Your model is using a single output by default and I’m unsure what len(self.targetColumns) is. If it’s also 1 your model outputs logits for a single class only and thus cannot be wrong.

Indeed, my mistake was missing the 3 needed output neurons. I was only using one. You are right about targetColumns - my mistake was trying to output the values of 0…2 (classes) into a single output neuron instead of attempting to project the “confidence” of each input into separate (per-class) neurons.