RuntimeError: Given groups=1, weight[16, 1, 5, 5], so expected input[100, 3, 224, 224] to have 1 channels, but got 3 channels instead

I am newbie to learn CNN and pytorch (I haven’t touched tensorflow)
My pytorch is 0.3.0.post4

Below is my first CNN code to learning and classifying my own pictures. all my pictures are gray (one channel) and width x height = 100x100

Summary

“”“
https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/convolutional_neural_network/main.py
”""
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as dsets
import torch.utils.data as data
import torchvision.transforms as transforms
from torch.autograd import Variable
import os
import os.path

USE_MNIST = False

IMAGE_ROOT_PATH = ‘/home/XXXX/project-space/auto-driving’
IMAGE_TRAIN_PATH = IMAGE_ROOT_PATH+’/train/‘
IMAGE_TEST_PATH = IMAGE_ROOT_PATH +’/test/'
IMAGE_LABEL_FILE = 'image_labels.txt’
IMAGE_COUNT = 0
learning_rate = 0.001
num_epochs = 5
batch_size = 100

normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])

def default_loader(path):
return Image.open(path).convert(‘RGB’)

def default_flist_reader(flist):
""“
flist format: impath label\nimpath label\n …(same to caffe’s filelist)
”""
imlist = []
with open(flist, ‘r’) as rf:
for line in rf.readlines():
impath, imlabel = line.strip().split(’,’)
imlist.append((impath, int(imlabel)))

return imlist

class ImageFilelist(data.Dataset):
def init(self, root, flist, transform=None, target_transform=None,
flist_reader=default_flist_reader, loader=default_loader):
self.root = root
self.imlist = flist_reader(flist)
self.transform = transform
self.target_transform = target_transform
self.loader = loader

def __getitem__(self, index):
    impath, target = self.imlist[index]
    # print(index,"join path=",os.path.join(self.root, impath))
    img = self.loader(os.path.join(self.root, impath))
    if self.transform is not None:
        img = self.transform(img)
    if self.target_transform is not None:
        target = self.target_transform(target)

    return img, target

def __len__(self):
    return len(self.imlist)

if USE_MNIST:

# MNIST Dataset
train_dataset = dsets.MNIST(root='./data/',
                            train=True,
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data/',
                           train=False,
                           transform=transforms.ToTensor())

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

else:

#use my own pictures
train_loader = torch.utils.data.DataLoader(
    ImageFilelist(
        root=IMAGE_TRAIN_PATH, flist=IMAGE_TRAIN_PATH+IMAGE_LABEL_FILE,
        transform=preprocess
    ),
    batch_size=batch_size, shuffle=False,
    num_workers=1, pin_memory=True
)

test_loader = torch.utils.data.DataLoader(
    ImageFilelist(
        root=IMAGE_TEST_PATH, flist=IMAGE_TEST_PATH+ IMAGE_LABEL_FILE,
        transform=preprocess
    ),
    batch_size=1, shuffle=False,
    num_workers=1, pin_memory=True
)

class CNN3(nn.Module):
def init(self):
super(CNN3, self).init()
self.conv1 = nn.Sequential( # input shape (1, 100, 100)
nn.Conv2d(
in_channels=1,
out_channels=16, # n_filters
kernel_size=5, # filter size
stride=1, # filter movement/step
padding=2,
), # output shape (16, 28, 28)
nn.ReLU(), # activation
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential( # input shape (1, 28, 28)
nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, 14, 14)
nn.ReLU(), # activation
nn.MaxPool2d(2), # output shape (32, 7, 7)
)
self.out = nn.Linear(7732, 10) # fully connected layer, output 10 classes

def forward(self, x):
    x = self.conv1(x)
    x = self.conv2(x)
    x = x.view(x.size(0), -1)   
    output = self.out(x)
    return output

cnn = CNN3()
print(cnn)

quit(0)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images)
labels = Variable(labels)

    # Forward + Backward + Optimize
    optimizer.zero_grad()
    outputs = cnn(images)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    if (i + 1) % 100 == 0:
        print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
              % (epoch + 1, num_epochs, i + 1, len(train_loader.dataset) // batch_size, loss.data[0]))

cnn.eval()
correct = 0
total = 0

for images, labels in test_loader:
images = Variable(images)
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()

print(‘Test Accuracy of the model on the 10000 test images: %d %%’ % (100 * correct / total))

When I run it, If I use MNIST pictures, the code runs well. But if I use my own pictures,it appear an error:

Traceback (most recent call last):
File “/home/XXXX/py-spaces/matplot-space/mp-lesson01/load_images/cnn_101.py”, line 207, in
outputs = cnn(images)
File “/home/XXXX/pyvenv/tensorflow/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 325, in call
result = self.forward(*input, **kwargs)
File “/home/XXXX/py-spaces/matplot-space/mp-lesson01/load_images/cnn_101.py”, line 186, in forward
x = self.conv1(x)
File “/home/XXXX/pyvenv/tensorflow/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 325, in call
result = self.forward(*input, **kwargs)
File “/home/XXXX/pyvenv/tensorflow/lib/python3.6/site-packages/torch/nn/modules/container.py”, line 67, in forward
input = module(input)
File “/home/XXXX/pyvenv/tensorflow/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 325, in call
result = self.forward(*input, **kwargs)
File “/home/XXXX/pyvenv/tensorflow/lib/python3.6/site-packages/torch/nn/modules/conv.py”, line 277, in forward
self.padding, self.dilation, self.groups)
File “/home/XXXX/pyvenv/tensorflow/lib/python3.6/site-packages/torch/nn/functional.py”, line 90, in conv2d
return f(input, weight, bias)
RuntimeError: Given groups=1, weight[16, 1, 5, 5], so expected input[100, 3, 224, 224] to have 1 channels, but got 3 channels instead

Process finished with exit code 1

Does somebody instruct me what wrong with my code or my pictures ?
Many thanks