Losses turns zero in first epoch

I’m trying to train a convolutional neural networkd on 1d signal imput. For this I have written a custom dataset loader. Afterwards I pass the data to my neural network architecture, however, with the problem of receiving zero loss and no change in accuracy (stays at 100%). My assumption is that there is something wrong on how my labels are passed to the network but since it’s the first time using pytorch i’m not sure. Here is my code:

class Dataset:
    def __init__(self, data, target):
        self.data = data
        self.target = target
        
    def __len__(self):
        return self.data.shape[0]
    
    def __getitem__(self, idx):
        data = self.data[idx, :]
        target = self.target[idx]
        return (torch.tensor(data, dtype=torch.float32), torch.tensor(target, dtype=torch.long))

# train loader
dataset = Dataset(data = X_amp_tr_A.T, target = sl_clf_train)
train_loader = DataLoader(dataset, batch_size=32, shuffle=True)

# test loader
dataset = Dataset(data = X_amp_test_A.T, target = sl_clf_test)
test_loader = DataLoader(dataset, batch_size=32, shuffle=True)

class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv11 = nn.Conv1d(9999, 128, 5)
        self.conv12 = nn.Conv1d(128, 128, 5)
        self.pool = nn.MaxPool1d(2, 2)
        
        self.conv21 = nn.Conv1d(128, 64,5)
        self.conv22 = nn.Conv1d(64,64,5)
        self.pool = nn.MaxPool1d(2, 2)
        
        self.fc1 = nn.Linear(20032, 1)
        self.fc3 = nn.Linear(1, 2)

    def forward(self, x):
        x = self.pool(F.relu(self.conv11(x)))
        x = self.pool(F.relu(self.conv12(x)))
        x = self.pool(F.relu(self.conv21(x)))
        x = self.pool(F.relu(self.conv22(x)))
        x = torch.flatten(x, 1) # flatten all dimensions except batch
        #print(x.size())
        x = F.relu(self.fc1(x))
        #x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


model = Net()#.type(torch.LongTensor)#.double()

def train(epoch):
    model.train()
    #exp_lr_scheduler.step()

    for batch_idx, (data, target) in enumerate(train_loader):
        #data, target = 
        
        if torch.cuda.is_available():
            data = data.cuda()
            target = target.cuda()
        
        optimizer.zero_grad()
        output = model(data.double())
        loss = criterion(output, target.long())
        
        #loss.requires_grad = True
        loss.backward()
        optimizer.step()
        
        #if (batch_idx + 1)% 2 == 0:
        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
                100. * (batch_idx + 1) / len(train_loader), loss.data))


def evaluate(data_loader):
    model.eval()
    loss = 0
    correct = 0
    total = 0
    for data, target in data_loader:
        #data, target = Variable(data, volatile=True), Variable(target)
        if torch.cuda.is_available():
            data = data.cuda()
            target = target.cuda()
        
        output = model(data.double())
        
        loss += F.cross_entropy(output.long(), target.long(), size_average=False).data
        
        
        #pred = output.data.max(1, keepdim=True)[1]
        #correct += pred.eq(target.data.view_as(pred)).cpu().sum()
        _, predicted = torch.max(output.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum()
        accuracy = correct / total *100
        
    loss /= len(data_loader.dataset)
        
    print('\nAverage loss: {:.4f}, Accuracy: ({:.3f}%)\n'.format(
        loss, accuracy))

n_epochs = 10

for epoch in range(n_epochs):
    train(epoch)
    #evaluate(train_loader)

I would be happy for guidance.

I suggest to use a modern IDE (pycharm, vs code), insert a breakpoint somewhere inside the loop over train_loader and go line by line in debug mode in there (including inside the model). You’ll be able to spot any funnyness in labels etc. in no time. No point trying to guess what the problem is! :slight_smile:

Thank you this already helped (yet so simple).