Arr = [self.vocab.stoi[x] for x in arr] KeyError: 'hinton city official confirm multiple building fire

here is the hole processing script.

notice that I processed other datasets by this function and I run my network without problems, but with this dataset I get the problem

df = pd.read_csv('quora.csv', usecols=['question_text','target'])[:10000]

def clean_text(tweet):
    import re
    from nltk.corpus import stopwords
    from nltk.stem.wordnet import WordNetLemmatizer
    from nltk.stem import PorterStemmer

    # Character entity references
    tweet = re.sub(r"&?gt;", ">", tweet)
    tweet = re.sub(r"&?lt;", "<", tweet)
    tweet = re.sub(r"&?amp;", "&", tweet)

    # Typos, slang and informal abbreviations
    tweet = re.sub(r"\d*/\d*/?\d*", "y-m-d", tweet)
    tweet = re.sub("\d+:\d+", 'hour', tweet)
    tweet = re.sub(r"\d+yr", "old year", tweet)
    tweet = re.sub("#\S*", 'h-tag ', tweet)
    tweet = re.sub(r"http\S*", "URL", tweet)
    tweet = re.sub(r"\S*\@\S*", "email", tweet)
    tweet = re.sub("\s{2,}", " ", tweet)


    # Hashtags and usernames
    tweet = re.sub(r"\S*(M|m)usic\S*", "music", tweet)
    tweet = re.sub(r"(P|p)rophet (M|m)uhammad", "prophet muhammad", tweet)
    tweet = re.sub(r"\S*(L|l)ove\S*", "love", tweet)
    tweet = re.sub(r"\S* (S|s)ummer\S*", "summer", tweet)
    tweet = re.sub(r"\S*NASA\S*", "nasa", tweet)
    tweet = re.sub(r"\S*book\S*", "book", tweet)
    tweet = re.sub(r"\S*(I|i)sland\S*", "Island", tweet)
    tweet = re.sub(r"20\d\d", "year", tweet)
    tweet = re.sub(r"\S*(K|k)ing\S*", "king", tweet)
    tweet = re.sub(r"\S*(C|c)it(ies|y)\S*", "city", tweet)
    tweet = re.sub(r"\S*RT\S*", "RT", tweet)
    tweet = re.sub(r"\S*(H|h)ealth\S*", "health", tweet)
    tweet = re.sub(r"\S*(S|s)ave\S*", "save bees", tweet)
    tweet = re.sub(r"\S*(T|t)raffic\S*", "traffic", tweet)
    tweet = re.sub(r"\S*(K|k)ashmir\S*", "kashmir", tweet)
    tweet = re.sub(r"\S*(C|c)onflict\S*", "conflict", tweet)
    tweet = re.sub(r"\S*(S|s)torm\S*", "storm", tweet)
    tweet = re.sub(r"\S*(O|o)il\S*", "oil", tweet)
    tweet = re.sub(r"\S*(V|v)ideo\S*", "video", tweet)
    tweet = re.sub(r"\S*([Ff])ire\S*", "fire", tweet)
    tweet = re.sub(r"\S*(W|w)eather\S*", "weather", tweet)
    tweet = re.sub(r"\S*(S|s)un\S+", "sun", tweet)
    tweet = re.sub(r"\S*(BBC|bbc)\S*", "bbc news", tweet)
    tweet = re.sub(r"\S*(D|d)ay\S*", "day", tweet)
    tweet = re.sub(r"\S*(E|e)ffect\S*", "effect", tweet)
    tweet = re.sub(r"\S*([Tt])error\S*", "terrorism", tweet)
    tweet = re.sub(r"\S*([Ss])ocial\S*", "social", tweet)
    tweet = re.sub(r"\S*([Ww])ord\S*", "wosrd", tweet)
    tweet = re.sub(r"\S*([Aa])ccident\S*", "accident", tweet)
    tweet = re.sub(r"\S*([Ss])port\S*", "sport", tweet)
    tweet = re.sub(r"\S*([Ii])ndia", "india", tweet)
    tweet = re.sub(r"\S*UK\S*", "UK", tweet)
    tweet = re.sub(r"\S*USA?\S*", "USA", tweet)
    tweet = re.sub(r"\w*(N|n)EWS", "NEWS", tweet)
    tweet = re.sub(r"\w*(D|d)am", "dam", tweet)
    tweet = re.sub(r"\S*(V|v)ideo\S*", "video", tweet)
    tweet = re.sub(r"\w*(G|g)ames?", "Game", tweet)
    tweet = re.sub(r"\S*(Y|y)ou(T|t)ube\S*", "youtube", tweet)
    tweet = re.sub(r"hrs", "hour", tweet)
    tweet = re.sub(r"txt", "text", tweet)
    tweet = re.sub(r"\s+k\s+", "ok", tweet)
    tweet = re.sub(r"\s+b\s+", "be", tweet)
    tweet = re.sub(r"\s+u(r|s)?", "you", tweet)
    tweet = re.sub(r"hom", "home", tweet)
    tweet = re.sub(r"yous", "you", tweet)
    tweet = re.sub(r"\s+n\s+", "in", tweet)
    tweet = re.sub(r"got", "get", tweet)
    tweet = re.sub(r"gave", "give", tweet)
    tweet = re.sub(r"fr", "for", tweet)

    text = tweet.lower()

    text = re.sub(r"i'm", "i am", text)
    text = re.sub(r"he's", "he is", text)
    text = re.sub(r"she's", "she is", text)
    text = re.sub(r"it's", "it is", text)
    text = re.sub(r"that's", "that is", text)
    text = re.sub(r"what's", "that is", text)
    text = re.sub(r"where's", "where is", text)
    text = re.sub(r"how's", "how is", text)
    text = re.sub(r"\'ll", " will", text)
    text = re.sub(r"\'ve", " have", text)
    text = re.sub(r"\'re", " are", text)
    text = re.sub(r"\'d", " would", text)
    text = re.sub(r"\'re", " are", text)
    text = re.sub(r"won't", "will not", text)
    text = re.sub(r"can't", "cannot", text)
    text = re.sub(r"n't", " not", text)
    text = re.sub(r"n'", "ng", text)
    text = re.sub(r"'bout", "about", text)
    text = re.sub(r"'til", "until", text)
    text = re.sub(r"'til", "until", text)
    text = re.sub("\?{2,}", " ?", text)
    text = re.sub("\!{2,}", " !", text)
    text = re.sub("\?", " ?", text)
    text = re.sub("\!", " !", text)

    tweet = text

    # Contractions
    tweet = re.sub(r"there's", "there is", tweet)
    tweet = re.sub(r"that's", "that is", tweet)
    tweet = re.sub(r"won't", "will not", tweet)
    tweet = re.sub(r"they're", "they are", tweet)
    tweet = re.sub(r"can't", "cannot", tweet)
    tweet = re.sub(r"wasn't", "was not", tweet)
    tweet = re.sub(r"isn't", "is not", tweet)
    tweet = re.sub(r"what's", "what is", tweet)
    tweet = re.sub(r"haven't", "have not", tweet)
    tweet = re.sub(r"hasn't", "has not", tweet)
    tweet = re.sub(r"there's", "there is", tweet)
    tweet = re.sub(r"he's", "he is", tweet)
    tweet = re.sub(r"it's", "it is", tweet)
    tweet = re.sub(r"you're", "you are", tweet)
    tweet = re.sub(r"i'm", "i am", tweet)
    tweet = re.sub(r"shouldn't", "should not", tweet)
    tweet = re.sub(r"wouldn't", "would not", tweet)
    tweet = re.sub(r"i'm", "i am", tweet)
    tweet = re.sub(r"in't", "is not", tweet)
    tweet = re.sub(r"here's", "here is", tweet)
    tweet = re.sub(r"you've", "you have", tweet)
    tweet = re.sub(r"we're", "we are", tweet)
    tweet = re.sub(r"what's", "what is", tweet)
    tweet = re.sub(r"couldn't", "could not", tweet)
    tweet = re.sub(r"we've", "we have", tweet)
    tweet = re.sub(r"who's", "who is", tweet)
    tweet = re.sub(r"it'll", "it will", tweet)
    tweet = re.sub(r"we'll", "we will", tweet)
    tweet = re.sub(r"We've", "we have", tweet)
    tweet = re.sub(r"he'll", "he will", tweet)
    tweet = re.sub(r"y'all", "you all", tweet)
    tweet = re.sub(r"weren't", "were not", tweet)
    tweet = re.sub(r"they'll", "they will", tweet)
    tweet = re.sub(r"they'd", "they would", tweet)
    tweet = re.sub(r"they've", "they have", tweet)
    tweet = re.sub(r"should've", "should have", tweet)
    tweet = re.sub(r"where's", "where is", tweet)
    tweet = re.sub(r"we'd", "we would", tweet)
    tweet = re.sub(r"weren't", "were not", tweet)
    tweet = re.sub(r"they're", "they are", tweet)
    tweet = re.sub(r"let's", "let us", tweet)
    tweet = re.sub(r"can't", "cannot", tweet)
    tweet = re.sub(r"you're", "you are", tweet)
    tweet = re.sub(r"that's", "that is", tweet)
    tweet = re.sub(r"doesn't", "does not", tweet)
    tweet = re.sub(r"didn't", "did not", tweet)
    tweet = re.sub(r"ain't", "am not", tweet)
    tweet = re.sub(r"you'll", "you will", tweet)
    tweet = re.sub(r"i've", "i have", tweet)
    tweet = re.sub(r"don't", "do not", tweet)
    tweet = re.sub(r"i'll", "i will", tweet)
    tweet = re.sub(r"i'd", "i would", tweet)
    tweet = re.sub(r"let's", "let us", tweet)
    tweet = re.sub(r"you'd", "you would", tweet)
    tweet = re.sub(r"it's", "it is", tweet)
    tweet = re.sub(r"ain't", "am not", tweet)
    tweet = re.sub(r"haven't", "have not", tweet)
    tweet = re.sub(r"could've", "could have", tweet)
    tweet = re.sub("\d+", ' num ', tweet)
    tweet = re.sub(r'[^a-z?!]', ' ', tweet)
    tweet = ' '.join([word for word in tweet.split() if word not in stopwords.words('english')])
    tweet = ' '.join([WordNetLemmatizer().lemmatize(word) for word in tweet.split()])
    tweet = ' '.join([PorterStemmer().stem(word) for word in tweet.split()])
    return tweet

df['question_text'] = df['question_text'].apply(lambda x: clean_text(x))

df.sample(frac=1., random_state=42).to_csv('processedr.csv', index=False)