IndexError: index -9223372036854775808 is out of bounds for dimension 1 with size 2. Index error with y_true_2 from following code snippet

Hello,

I am trying to train a siamese network with Binary Cross Entropy.

I have the following error in train_epoch:

y_true_2[range(y_true_2.shape[0]), y_true.long()] = 1
IndexError: index -9223372036854775808 is out of bounds for dimension 1 with size 2

Following is the code snippet for reference:

def train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metrics, step=None):
    for metric in metrics:
        metric.reset()

    model.train()
    losses = []
    total_loss = 0


    for batch_idx, ((x0, x1), y) in enumerate(train_loader):
        y_true = y
        x0, x1, y_true = x0.cuda(), x1.cuda(), y.cuda()
 
        optimizer.zero_grad()
        output1, output2 = model(x0, x1)

        #print('output1 is:', output1)
        #print('output2 is:', output2)
        #print('y_true is', y_true)

        '''Distance metric - PairwiseDistance'''
        #p_dist = torch.nn.PairwiseDistance(keepdim=True)
        p_dist = torch.nn.CosineSimilarity(dim=1, eps=1e-08)

        dy = p_dist(output1, output2)
        # dy = torch.reshape(dy, (2,1))
        # print('dy_shape after squeeze', dy.size())
        dy = torch.nan_to_num(dy)
        # y_true = torch.reshape(y_true, (2,))
        # print('y_true_shape after squeeze', y_true.size())
        y_true = torch.nan_to_num(y_true)
        # print('dy is:', dy)

        '''2 lines indicated the normalization of dy to 0 and 1 by dividing it with max value'''

        maximum_dy = torch.max(dy)
        
        maximum_dy = torch.nan_to_num(maximum_dy)
        
        dy = dy / maximum_dy
		
		
		
		
        # print('dy after max is', dy)
        # print('y_true is', y_true)

        maximum_y_true = torch.max(y_true)
        maximum_y_true = torch.nan_to_num(maximum_y_true)

        # print('maximum y_true is', maximum_y_true)
        y_true = y_true / maximum_y_true

        # print(' y_true after max is', y_true)

        # print('dy_shape:', dy.size(), 'y_true_shape:', y_true.size())

        # print('dy_shape after squeeze', dy.size())

        #dy = torch.squeeze(dy, 1)


        input_dy = torch.empty(dy.size(0), 2)
        #print(input_dy )
        input_dy[:, 0] = 1 - dy
        input_dy[:, 1] = dy
        # print('Output for loss function', input_dy)

        y_true_2 = torch.zeros(dy.size(0), 2)
        #print(y_true_2)
        #y_true_2 = torch.nan_to_num(y_true_2)
        y_true_2[range(y_true_2.shape[0]), y_true.long()] = 1
        #y_true_2 = torch.nan_to_num(y_true_2)
        m = nn.Sigmoid()
        loss = loss_fn(m(input_dy), y_true_2)

        loss.backward()
        optimizer.step()

        losses.append(loss.item())
        total_loss += loss.item()


        input_dy_metric = torch.round(input_dy)


        for metric in metrics:
            metric(input_dy_metric, y_true_2)
            metric.total += y_true_2.shape[0]

        if batch_idx % log_interval == 0:
            message = 'Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                batch_idx, len(train_loader),
                100. * batch_idx / len(train_loader), np.mean(losses))
            for metric in metrics:
                message += '\t{}: {}'.format(metric.name(), metric.value())

            #print(message)
            losses = []


    total_loss /= (batch_idx + 1)
    return total_loss, metrics

Can anyone please help here?
Thanks in advance

Could you post aminimal, executable code snippet which would reproduce this issue, please?
Also, in case you are not using the latest PyTorch release, could you update it please and rerun your code?