Pytorch torchvision.datasets.ImageFolder FileNotFoundError: Found no valid file for the classes

Tried to load training data with PyTorch torch.datasets.ImageFolder in Colab. I am using Wiki crop data for age and gender prediction.
I am try to fix this error but I couldn’t.


I have uploaded my code here,
‘’’

WikiCrop_df = pd.read_csv('/content/drive/MyDrive/wiki_crop/wikiCrop.csv')

#Define the path to the directory containing the images
src_folder = '/content/drive/MyDrive/wiki_crop/'

# Define the path to the train directory
train_dir = '/content/drive/MyDrive/WikiCrop_TrainDirData'

# Define the path to the test directory
test_dir = '/content/drive/MyDrive/WikiCrop_TestDirData'

import os

# Create the train and test directories
if not os.path.exists(train_dir):
    os.makedirs(train_dir)
if not os.path.exists(test_dir):
    os.makedirs(test_dir)

#Now, we can create subdirectories in the train and test directories for each label using age and gender information in your data frame:

for age in filtered_df.age.unique():
    os.mkdir(os.path.join(train_dir, str(age)))
    os.mkdir(os.path.join(test_dir, str(age)))

for gender in filtered_df.gender.unique():
    os.mkdir(os.path.join(train_dir, str(gender)))
    os.mkdir(os.path.join(test_dir, str(gender)))

# Split the data frame into train and test sets, with a specified ratio (e.g., 80/20):

train_ratio = 0.8

train_df = WikiCrop_df.sample(frac=train_ratio, random_state=42)
test_df = WikiCrop_df.drop(train_df.index)

# Create the train and test directories
if not os.path.exists(train_dir):
    os.makedirs(train_dir)
if not os.path.exists(test_dir):
    os.makedirs(test_dir)

for index, row in train_df.iterrows():
    src_path = os.path.join(src_folder, row['path'])
    dst_path = os.path.join(train_dir, str(row['age']), str(row['gender']), str(row['path']))
    if src_path != dst_path:
        try:
            copyfile(src_path, dst_path)
        except FileExistsError as e:
            print(f"Error copying {src_path} to {dst_path}: {e}")
    

for index, row in test_df.iterrows():
    src_path = os.path.join(src_folder, row['path'])
    dst_path = os.path.join(test_dir, str(row['age']), str(row['gender']), str(row['path']))
    if src_path != dst_path:
        try:
            copyfile(src_path, dst_path)
        except FileExistsError:
            pass

#train and test data directory
train_data_dir = '/content/drive/MyDrive/WikiCrop_TrainDirData'
test_data_dir = '/content/drive/MyDrive/WikiCrop_TestDirData'

import torch
import torchvision
from torchvision import transforms
from torchvision.datasets import ImageFolder

#load the train and test data

train_dataset = ImageFolder(train_data_dir, transform = transforms.Compose([
transforms.Resize((112,112)), transforms.ToTensor()
]))

test_dataset = ImageFolder(test_data_dir, transform = transforms.Compose([
transforms.Resize((112,112)), transforms.ToTensor()
]))

Please tell me where my code is wrong. I tried to fix this error since long time but I couldn’t. I have uploaded how my WikiCrop_TrainDirData looks like.


Thank you in advance.

ImageFolder expects images in each of the class folders supporting the file extensions mentioned in the error message.
Could you check what each folder contains and could you make sure these are valid images?

These folders does not contain anything. For Wiki Crop data I have convert .mat file to csv.
‘’’
import numpy as np
from scipy.io import loadmat
import pandas as pd
import datetime as date
from dateutil.relativedelta import relativedelta

cols = [‘age’, ‘gender’, ‘path’]

wiki_mat = ‘/content/drive/MyDrive/wiki_crop/wiki.mat’

wiki_data = loadmat(wiki_mat)

del wiki_mat

wiki = wiki_data[‘wiki’]

wiki_photo_taken = wiki[0][0][1][0]
wiki_full_path = wiki[0][0][2][0]
wiki_gender = wiki[0][0][3][0]
#wiki_face_score1 = wiki[0][0][6][0]
#wiki_face_score2 = wiki[0][0][7][0]

wiki_path = []

for path in wiki_full_path:
wiki_path.append(‘/content/drive/MyDrive/wiki_crop/’ + path[0])

wiki_dob = []

for file in wiki_path:
wiki_dob.append(file.split(‘_’)[2])

wiki_age = []

for i in range(len(wiki_dob)):
try:
d1 = date.datetime.strptime(wiki_dob[i], ‘%Y-%m-%d’)
d2 = date.datetime.strptime(str(wiki_photo_taken[i]), ‘%Y’)
rdelta = relativedelta(d2, d1)
diff = rdelta.years
except Exception as ex:
print(ex)
diff = -1
wiki_age.append(diff)

#final_imdb = np.vstack((imdb_age, imdb_genders, imdb_path, imdb_face_score1, imdb_face_score2)).T
final_wiki = np.vstack((wiki_age, wiki_gender, wiki_path)).T

final_wiki_df = pd.DataFrame(final_wiki)

final_wiki_df.columns = cols

meta = pd.concat([final_wiki_df])

#meta = meta[meta[‘face_score1’] != ‘-inf’]
#meta = meta[meta[‘face_score2’] == ‘nan’]

#meta = meta.drop([‘face_score1’, ‘face_score2’], axis=1)

meta = meta.sample(frac=1)

meta.to_csv(‘meta.csv’, index=False)
‘’’
and convert thus .csv to dataframe using pandas and used for train test classification and ImageFolder


This I have used for train, test split and ImageFolder

Your code is not properly formatted so hard to read, but:

sounds weird as I don’t understand why you would use the ImageFolder dataset in this case.
Check the docs to real more about its use case, but as already mentioned it expects each subfolder to represent a class and to contain all corresponding images.
If you are dealing with a custom dataset stored in csv files you might want to create a custom Dataset as described in this tutorial.

wow.amazing.
I am a chinese student. I am studying it now.

I used ImageFolder to create Train and Test data directories and apply transform to resize the images and convert it to ToTensor.

My wiki crop folder contain these type of images. Is my image format is wrong so that it could create the error while I am running ImageFolder instruction.

This is my code part where I have convert .mat file to .csv file.

'''
import numpy as np
from scipy.io import loadmat
import pandas as pd
import datetime as date
from dateutil.relativedelta import relativedelta

cols = ['age', 'gender', 'path']

wiki_mat = '/content/drive/MyDrive/wiki_crop/wiki.mat'
wiki_data = loadmat(wiki_mat)
wiki = wiki_data['wiki']

wiki_photo_taken = wiki[0][0][1][0]
wiki_full_path = wiki[0][0][2][0]
wiki_gender = wiki[0][0][3][0]
#wiki_face_score1 = wiki[0][0][6][0]
#wiki_face_score2 = wiki[0][0][7][0]

wiki_path = []

for path in wiki_full_path:
    wiki_path.append('/content/drive/MyDrive/wiki_crop/' + path[0])

wiki_dob = []


for file in wiki_path:
    wiki_dob.append(file.split('_')[2])

wiki_age = []

for i in range(len(wiki_dob)):
    try:
        d1 = date.datetime.strptime(wiki_dob[i], '%Y-%m-%d')
        d2 = date.datetime.strptime(str(wiki_photo_taken[i]), '%Y')
        rdelta = relativedelta(d2, d1)
        diff = rdelta.years
    except Exception as ex:
        print(ex)
        diff = -1
    wiki_age.append(diff)

#final_imdb = np.vstack((imdb_age, imdb_genders, imdb_path, imdb_face_score1, imdb_face_score2)).T
final_wiki = np.vstack((wiki_age, wiki_gender, wiki_path)).T

final_wiki_df = pd.DataFrame(final_wiki)

final_wiki_df.columns = cols

meta = pd.concat([final_wiki_df])
#meta = meta[meta['face_score1'] != '-inf']
#meta = meta[meta['face_score2'] == 'nan']

#meta = meta.drop(['face_score1', 'face_score2'], axis=1)

meta = meta.sample(frac=1)

meta.to_csv('meta.csv', index=False)
'''

and after this I filtered out nan values from this and used this for train, test split and make directories using ImageFolder. Please tell where i did mistake.

okay. Now I got where I did the mistake. Thank you.