Roberta from transformer library giving same output during evaluation

I am using roberta from transformers library. I have trained the model for the classification task and taken the model.pooler_output and passed it to a classifier. While predicting I am getting same prediction for all the inputs. What could be the possible reason

This is my model

class Roberta_Arch(nn.Module):
    def __init__(self, roberta): 
        super(Roberta_Arch, self).__init__()
        self.roberta = roberta
        self.classifier = nn.Sequential(
            nn.Linear(768, 256), 
            nn.ReLU(),
            nn.Dropout(0.15),
            nn.Linear(256, 2),
            nn.LogSoftmax(dim = 1)
        )

    def forward(self, sent_id, mask):
        output = self.roberta(sent_id, attention_mask=mask)
        return self.classifier(output.pooler_output)

And this is my evaluation function

def evaluate():
    print('\n\n Predicting \n')
    model.eval()
    total_preds = []

    for step, batch in tqdm(enumerate(test_dataloader)):
        batch = [t.to(device) for t in batch]
        sent_id, mask = batch

        with torch.no_grad():
            preds = model(sent_id, mask)
            preds = preds.detach().cpu().numpy()
            print(preds)
            total_preds.append(preds)

    return total_preds