Embeddings from SNLI with transitions produce additional tensor

Hello all, I’m trying to create embeddings from SNLI datasets with shift-reduce parser, however it seems when creating the iterator using data.BucketIterator.splits() from torchtext, it created additional tensor compared to the ones without transitions in premise and hypothesis. What is this tensor for? Is it useful for creating embeddings?

Steps to reproduce:

import os 
import torch
from torchtext import data
from torchtext import datasets

# With transitions
inputs = datasets.nli.ParsedTextField(lower=True)
transitions = datasets.nli.ShiftReduceField()
answers = data.Field(sequential=False)

train, dev, test = datasets.SNLI.splits(inputs, answers, transitions)

# Without transitions
inputs = data.Field(lower=True, tokenize='spacy')
answers = data.Field(sequential=False)
train, dev, test = datasets.SNLI.splits(inputs, answers)

# =====================================================================
device = torch.device('cuda')

inputs.build_vocab(train, dev, test)
if os.path.isfile(os.path.join(os.getcwd(), '.vector_cache/input_vectors.pt')):
    inputs.vocab.vectors = torch.load(os.path.join(os.getcwd(), '.vector_cache/input_vectors.pt'))
else:
    inputs.vocab.load_vectors('glove.42B.300d')
    os.makedirs(os.path.dirname(os.path.join(os.getcwd(), '.vector_cache/input_vectors.pt')), exist_ok=True)
    torch.save(inputs.vocab.vectors, os.path.join(os.getcwd(), '.vector_cache/input_vectors.pt'))
answers.build_vocab(train)

train_iter, dev_iter, test_iter = data.BucketIterator.splits(
    (train, dev, test), batch_size=128, device=device)

batch = next(iter(train_iter))
print(batch)