Unknown resampling filter error when trying to create my own dataset with pytorch

I am trying to create a CNN implemented with data augmentation in pytorch to classify dogs and cats. The issue that I am having is that when I try to input my dataset and enumerate through it I keep getting this error:

Traceback (most recent call last):

File "<ipython-input-55-6337e0536bae>", line 75, in <module>
for i, (inputs, labels) in enumerate(trainloader):

File "/usr/local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 188, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])

File "/usr/local/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 188, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])

File "/usr/local/lib/python3.6/site-packages/torchvision/datasets/folder.py", line 124, in __getitem__
img = self.transform(img)

File "/usr/local/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 42, in __call__
img = t(img)

File "/usr/local/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 147, in __call__
return F.resize(img, self.size, self.interpolation)

File "/usr/local/lib/python3.6/site-packages/torchvision/transforms/functional.py", line 197, in resize
return img.resize((ow, oh), interpolation)

File "/usr/local/lib/python3.6/site-packages/PIL/Image.py", line 1724, in resize
raise ValueError("unknown resampling filter")

ValueError: unknown resampling filter

and I really dont know whats wrong with my code. I have provided the code below:

# Creating the CNN

# Importing the libraries
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torchvision
from torchvision import transforms

#Creating the CNN Model
class CNN(nn.Module):
    def __init__(self, nb_outputs):
        super(CNN, self).__init__() #activates the inheritance and allows the use of all the tools in the nn.Module
        #making the 3 convolutional layers that will be used in the convolutional neural network
        self.convolution1 = nn.Conv2d(in_channels = 1, out_channels = 32, kernel_size = 5) #kernal_size -> the deminson of the feature detector e.g kernel_size = 5 => feature detector of size 5x5
        self.convolution2 = nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 2)
        #making 2 full connections one to connect the inputs of the ANN to the hidden layer and another to connect the hidden layer to the outputs of the ANN
        self.fc1 = nn.Linear(in_features = self.count_neurons((1, 64,64)), out_features = 40)
        self.fc2 = nn.Linear(in_features = 40, out_features = nb_outputs)

    def count_neurons(self, image_dim):
        x = Variable(torch.rand(1, *image_dim)) #this variable repersents a fake image to allow us to compute the number of neruons
        #in order to pass the elements of the tuple image_dim into our function as a list of arguments we need to add a * before image_dim
        #since x will be going into our neural network we need to convert it into a torch variable using the Variable() function
        x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2)) #first we apply the convolution to x then apply max_pooling to the convolutional fake images and then activate all the neurons in the pooling layer
        x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2)) #the signals are now propragated up to the thrid convoulational layer
        #Now to flatten x to obtain the number of neurons in the flattening layer
        return x.data.view(1, -1).size(1) #this will flatten x into a huge vector and returns the size of the vector, that size repersents the number of neurons that will be inputted into the ANN
        #even though x is not a real image from the game since the size of the flattened vector only depends on the dimention of the inputted image we can just set x to have the same dimentions as the image

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2)) #first we apply the convolution to x then apply max_pooling to the convolutional fake images and then activate all the neurons in the pooling layer
        x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2)) 
        #flattening layer of the CNN
        x = x.view(x.size(0), -1)
        #x is now the inputs to the ANN
        x = F.relu(self.fc1(x)) #we propagte the signals from the flatten layer to the full connected layer and activate the neruons by breaking the linearilty with the relu function
        x = F.sigmoid(self.fc2(x))
        #x is now the output neurons of the ANN
        return x 

train_tf = transforms.Compose([transforms.RandomHorizontalFlip(),
                               transforms.Resize(64,64),
                               transforms.RandomRotation(20),
                               transforms.RandomGrayscale(.2),
                               transforms.ToTensor()])   

test_tf = transforms.Compose([transforms.Resize(64,64),
                              transforms.ToTensor()])    

training_set = torchvision.datasets.ImageFolder(root = './dataset/training_set',
                                                transform = train_tf)
test_set = torchvision.datasets.ImageFolder(root = './dataset/test_set',
                                                transform = test_tf)

trainloader = torch.utils.data.DataLoader(training_set, batch_size=32,
                                             shuffle=True, num_workers=0)
testloader = torch.utils.data.DataLoader(test_set, batch_size= 32,
                                             shuffle=False, num_workers=0)


#training the model
cnn = CNN(1)
cnn.train()
loss = nn.BCELoss()
optimizer = optim.Adam(cnn.parameters(), lr = 0.001) #the optimizer => Adam optimizer
nb_epochs = 25

for epoch in range(nb_epochs):
    train_loss = 0.0
    train_acc = 0.0
    total = 0.0
    for i, (inputs, labels) in enumerate(trainloader):
        inputs, labels = Variable(inputs), Variable(labels)
        cnn.zero_grad()
        outputs = cnn(inputs)
        loss_error = loss(outputs, labels)
        optimizer.step()

        _, pred = torch.max(outputs.data, 1)
        total += labels.size(0)
        train_loss += loss_error.data[0]
        train_acc += (pred == labels).sum()

    train_loss = train_loss/len(training_loader)
    train_acc = train_acc/total
    print('Epoch: %d, loss: %.4f, accuracy: %.4f' %(epoch+1, train_loss, train_acc))

The folder arrangement for the code is /dataset/training_set and inside the training_set folder are two more folders one for all the cat images and the other for all the dog images. Each image is name either dog.xxxx.jpg or cat.xxxx.jpg, where the xxxx represents the number so for the first cat image it would be cat.1.jpg up to cat.4000.jpg. This is the same format for the test_set folder. The number of training images is 8000 and the number of test images is 2000. If anyone can point out my error I would greatly appreciate it.

Thank you

2 Likes

Try to set the desired size in transforms.Resize as a tuple:

transforms.Resize((64, 64))

PIL is using the second argument (in your case 64) as the interpolation method.

10 Likes