I have a dataset including 12156*1954 images, which is so large that CPU/GPU memory can’t carry on, and training one epoch will take a very long time.
Is there any way I could succeed in training the PyTorch model?
class DataSet(Dataset):
def __init__(self,fname, index):
self.dataset = fname
self.dataset_num = index
def __len__(self):
return len(self.dataset)
def __getitem__(self,idx):
datas = np.load(self.dataset[idx])
x = datas['data'][self.dataset_num[idx]]
x = x.astype(np.float32)
x=torch.from_numpy(x)
tens = x.reshape(100*100)
inds = datas['inds'][self.dataset_num[idx]].tolist()
return tens, inds