K-fold cross validation

Hi,
I’m using the code over my data:
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)

training_data = [
(“The dog ate the apple”.split(), [“DET”, “NN”, “V”, “DET”, “NN”]),
(“Everybody read that book”.split(), [“NN”, “V”, “DET”, “NN”])
]
word_to_ix = {}
for sent, tags in training_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
tag_to_ix = {“DET”: 0, “NN”: 1, “V”: 2}

EMBEDDING_DIM = 6
HIDDEN_DIM = 6
class LSTMTagger(nn.Module):

def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
    super(LSTMTagger, self).__init__()
    self.hidden_dim = hidden_dim

    self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)

    # The LSTM takes word embeddings as inputs, and outputs hidden states
    # with dimensionality hidden_dim.
    self.lstm = nn.LSTM(embedding_dim, hidden_dim)

    # The linear layer that maps from hidden state space to tag space
    self.hidden2tag = nn.Linear(hidden_dim, tagset_size)

def forward(self, sentence):
    embeds = self.word_embeddings(sentence)
    lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
    tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
    tag_scores = F.log_softmax(tag_space, dim=1)
    return tag_scores

I know that PyTorch can handle different length of sentences.
The problem is that I want to add the option of cross-validation to improve my results.
But when I used the line:

sentences = []
tags = []
for sentence, target in training_data:
sentence_in = prepare_sequence(sentence, word_to_vec)
targets = prepare_sequence(target, tag_to_ix)
sentences.append(sentence_in)
tags.append(targets)

y_pred = cross_val_predict(net, sentences, tags, cv=3)

The fail I get is:
ValueError: Dataset does not have consistent lengths.

What can I do to use cross-validation?