Hi guys, so I’m using Pytorch’s prediction template to predict my model’s accuracy but when I add “print(f’{correct}/{total}’)” to see the results from batches I get:
104/128
208/256
312/384
416/512
Since the increment of correct result is exactly the same, I’m thinking there’s an issue somewhere. What’s wrong?
I added my data loader, dataset and trsfrm below:
test_trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(_image_size),
transforms.ToTensor(),
transforms.Normalize(_mean, _std),
])
test_ds = DiabetesDataset("./tmp/test", "retinopathy_solution.csv", transform=test_trans)
test_dl = torch.utils.data.DataLoader(test_ds, batch_size=128,
shuffle=False, num_workers=4)
class DiabetesDataset(Dataset):
def __init__(self, root_dir, csv_file, transform=None, loader=default_loader):
self.root_dir = Path(root_dir) if type(root_dir) is str else root_dir
self.image_names = list(self.root_dir.glob('*.jpeg'))
self.transform = transform
if csv_file:
labels = pd.read_csv(csv_file)
print(labels)
self.labels = dict(zip(list(labels.image), list(labels.level)))
else:
self.labels = None
self.loader = loader
def __len__(self):
return len(self.image_names)
def __getitem__(self, index):
image_path = self.image_names[index]
image = self.loader(image_path)
if self.transform:
image = self.transform(image)
if self.labels:
label = self.labels[image_path.stem]
return image, label
else:
return image, image_path.stem
Code from pytorch’s website:
dataiter = iter(test_dl)
images, labels = dataiter.next()
correct = 0
total = 0
with torch.no_grad():
model.eval()
for data in test_dl:
images, labels = images.to(DEVICE), labels.to(DEVICE)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'{correct}/{total}')
print('Accuracy of the model test images: %d %%' % (100 * correct / total))