Data Iterator in a loop

Hi, I use the following structure to load train data for my training.


for a in range(101):
     train_data_iterator = load_text_train_data(args.shard_path, src_vocab, trg_vocab, args.lang_pair, args.batch_size, a)
     total_step = len(train_data_iterator)
          for i, (images, sources, targets, slengths, tlengths) in enumerate(train_data_iterator):

Basically, I have to load 94 M million samples for training, but it is so much. For this reason, I split it into 100 pieces, and I load each piece of data to data iterator in a loop.

My data loading class is as follows:

class LoadUniModal(Dataset):
	sources = []
	targets = []
	maxlen = 0
	slengths = []
	tlengths = []

	def __init__(self, src, trg, src_vocab, trg_vocab):
		self.src_vocab = src_vocab
		self.trg_vocab = trg_vocab

		with codecs.open(src, encoding="utf-8") as f:
			for line in f:
				tokens = line.replace("\n", "").split()
				self.maxlen = max(self.maxlen, len(tokens))
				self.sources.append(tokens)
				self.slengths.append(len(tokens)+2)
		with codecs.open(trg, encoding="utf-8") as f:
			for line in f:
				tokens = line.replace("\n", "").split()
				self.maxlen = max(self.maxlen, len(tokens))
				self.targets.append(tokens)
				self.tlengths.append(len(tokens)+2)

		self.maxlen += 1

	# Overrride to give PyTorch access to any image on the dataset
	def __getitem__(self, index):

		# Source sentence processing
		tokens = self.sources[index]
		ntokens = [self.src_vocab['<START>']]
		for a in range(self.maxlen):
			if a <= (len(tokens) - 1):
				if tokens[a] in self.src_vocab.keys():
					ntokens.append(self.src_vocab[tokens[a]])
				else:
					ntokens.append(self.src_vocab['<UNK>'])
			elif a == len(tokens):
				ntokens.append(self.src_vocab['<END>'])
			elif a > len(tokens):
				ntokens.append(self.src_vocab['<PAD>'])

		source = torch.from_numpy(np.asarray(ntokens)).long()

		# Target sentence processing
		tokens = self.targets[index]
                ntokens = [self.trg_vocab['<START>']]
                for a in range(self.maxlen):
                        if a <= (len(tokens) - 1):
                                if tokens[a] in self.trg_vocab.keys():
                                        ntokens.append(self.trg_vocab[tokens[a]])
                                else:
                                        ntokens.append(self.trg_vocab['<UNK>'])
                        elif a == len(tokens):
                                ntokens.append(self.trg_vocab['<END>'])
                        elif a > len(tokens):
                                ntokens.append(self.trg_vocab['<PAD>'])

                target = torch.from_numpy(np.asarray(ntokens)).long()

		slength = self.slengths[index]
		tlength = self.tlengths[index]

		return [0], source, target, slength, tlength

	def __len__(self):
		return len(self.sources)

and “load_text_train_data” function is as follows:

def load_text_train_data(train_dir, src_vocab, trg_vocab, lang_pair, batch_size, num):

        tpl = ast.literal_eval(lang_pair)
        slang = tpl[1]
        tlang = tpl[2]

        strain_file = os.path.join(train_dir, "train"+slang+"."+str(num))
        ttrain_file = os.path.join(train_dir, "train"+tlang+"."+str(num))

        data_iter = LoadUniModal(strain_file, ttrain_file, src_vocab, trg_vocab)
        data_iter = DataLoader(data_iter, batch_size=batch_size)

        return data_iter

The issue is that, although I give different data path as input to data iterator, it does not delete the previous data saved inside, it loads all the previous given data input and the current given data input. Basically, it doesn’t delete the history.

Let’s say, if in “train.de.1” file there are 50000 batches, and if in “train.de.2” file there are 40000 batches, in the first data iterator it shows 50000 total length, but in the second it shows 90000 total length. It doesn’t delete 50000 previous data.

This is a problem for me. How I can fix it?