transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean = [0.5, 0.5, 0.5],
std = [0.5, 0.5, 0.5])])
train_dataset = torchvision.datasets.ImageFolder(root=DATASET_PATH + '/train/train_data', transform=transform)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=2)
print("Found {} number of mini-batches".format(len(train_loader)))
t0 = time.time()
total_step = len(train_loader)
for epoch in range(nb_epoch):
avg_loss = 0.0
for i, (images, lables) in enumerate(train_loader):
images = images.to(device)
labels = lables.to(device)
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 224 and 475 in dimension 2 at /pytorch/aten/src/TH/generic/THTensorMath.cpp:3616
I tried to apply transform Resize and loaded during training and this error came up. It says that image sizes are different. How is this happening even though I resized all the images in the same size??
Any advice would be welcome!