Key Error 'None' arr = [self.vocab.stoi[x] for x in arr]

Hello all,
I am trying to classify ‘Greetings’ in a sentence, so if a sentence has some kind of greeting it labelled as ‘1’ otherwise ‘0’. During training the code runs well and while testing I face this error.

What I found from other post is that it is related to LabelField. They say it related to UNK but I didn’t understand it I tried what they suggested but it didn’t help.

Below is the code where I prepare the dataset:

class RSICSDataset(data.Dataset):
    def __init__(self, df, fields, is_test=False, **kwargs):
        examples = []
        for i, row in df.iterrows():
            label = row.Greeting if not is_test else None
            text = row.Selected
            examples.append(data.Example.fromlist([text, label], fields))

        super().__init__(examples, fields, **kwargs)

    @staticmethod
    def sort_key(ex):
        return len(ex.text)

    @classmethod
    def splits(cls, fields, train_df, val_df=None, test_df=None, **kwargs):
        train_data, val_data, test_data = (None, None, None)
        data_field = fields

        if train_df is not None:
            train_data = cls(train_df.copy(), data_field, **kwargs)
        if val_df is not None:
            val_data = cls(val_df.copy(), data_field, **kwargs)
        if test_df is not None:
            test_data = cls(test_df.copy(), data_field, True, **kwargs)

        return tuple(d for d in (train_data, val_data, test_data) if d is not None)

class BuildDataset:
    def __init__(self):
        self.TEXT = data.Field(tokenize='spacy', include_lengths=True)
        self.LABEL = data.LabelField(unk_token='UNK', dtype=torch.float, is_target=True)
        # self.LABEL = data.Field(unk_token=None, dtype=torch.int, is_target=True)

    def get_dataset(self, train_df, test_df):

        fields = [('text', self.TEXT), ('labels', self.LABEL)]

        self.train_ds, self.test_ds = RSICSDataset.splits(fields, train_df=train_df, test_df=test_df)
        return self.train_ds, self.test_ds

    def create_vocalb(self, train_ds, test_ds):
        if isinstance(train_ds, RSICSDataset):
            self.TEXT.build_vocab(train_ds,
                             max_size=config.vocalb_size,
                             vectors='glove.6B.200d',
                             unk_init=torch.Tensor.zero_)

            self.LABEL.build_vocab(train_ds)

            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

            train_iterator, test_iterator = data.BucketIterator.splits(
                (train_ds, test_ds),
                batch_size=config.batch_size,
                sort_within_batch=True,
                device=device)
            return train_iterator, test_iterator

        else:
            print('Unknown dataset format. !!')

Error in details:

 File "/home/garud/Documents/GCP/greetings_nlp/main.py", line 56, in visualize_data
    model_trained = trainer.train_net(model, train_iter, test_iter, epochs=1)
  File "/home/garud/Documents/GCP/greetings_nlp/trainer.py", line 27, in train_net
    test_acc = evaluate(model, test_iter)
  File "/home/garud/Documents/GCP/greetings_nlp/trainer.py", line 57, in evaluate
    for i, batch in enumerate(test_iter):
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/data/iterator.py", line 162, in __iter__
    yield Batch(minibatch, self.dataset, self.device)
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/data/batch.py", line 36, in __init__
    setattr(self, name, field.process(batch, device=device))
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/data/field.py", line 234, in process
    tensor = self.numericalize(padded, device=device)
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/data/field.py", line 335, in numericalize
    arr = [self.vocab.stoi[x] for x in arr]
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/data/field.py", line 335, in <listcomp>
    arr = [self.vocab.stoi[x] for x in arr]
KeyError: None

Thanks every help means a lot.

Most likely some of the labels in the test_ds are not recognized. Ideally, the train_ds should contain all the labels expected in test_ds. If not try this:

self.LABEL.build_vocab((train_ds, test_ds))

Thanks Abhilash.
I tried what you suggested and came to this error.


 File "/home/garud/Documents/GCP/greetings_nlp/main.py", line 33, in visualize_data
    test_ds)
  File "/home/garud/Documents/GCP/greetings_nlp/dataloader.py", line 74, in create_vocalb
    self.LABEL.build_vocab((train_ds, test_ds))
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/data/field.py", line 306, in build_vocab
    self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)
  File "/home/garud/Documents/GCP/greetings_nlp/venv/lib/python3.7/site-packages/torchtext/vocab.py", line 75, in __init__
    words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
TypeError: '<' not supported between instances of 'RSICSDataset' and 'RSICSDataset'

See if this works:

self.LABEL.build_vocab(train_ds, test_ds)

it didn’t worked. Got the same vocal.stoi key error.