RuntimeError: Given groups=1, weight of size [64, 3, 3, 3], expected input[1, 112, 112, 3] to have 3 channels, but got 112 channels instead

Hi i try to run face recogitionmodule and when i run it error always pop up i need to write my dissertaion but if this keep happen i cant do anything can anyone help
this is my data loader

class LFW(object):
    def __init__(self, root, specific_folder, img_extension, preprocessing_method=None, crop_size=(96, 112)):
        """
        Dataloader of the LFW dataset.

        root: path to the dataset to be used.
        specific_folder: specific folder inside the same dataset.
        img_extension: extension of the dataset images.
        preprocessing_method: string with the name of the preprocessing method.
        crop_size: retrieval network specific crop size.
        """

        self.preprocessing_method = preprocessing_method
        self.crop_size = crop_size
        self.imgl_list = []
        self.classes = []
        self.people = []
        self.model_align = None

        # read the file with the names and the number of images of each people in the dataset
        with open(os.path.join(root, 'people.txt')) as f:
            people = f.read().splitlines()[1:]

        # get only the people that have more than 20 images
        for p in people:
            p = p.split('\t')
            if len(p) > 1:
                if int(p[1]) >= 20:
                    for num_img in range(1, int(p[1]) + 1):
                        self.imgl_list.append(os.path.join(root, specific_folder, p[0], p[0] + '_' +
                                                           '{:04}'.format(num_img) + '.' + img_extension))
                        self.classes.append(p[0])
                        self.people.append(p[0])

        le = preprocessing.LabelEncoder()
        self.classes = le.fit_transform(self.classes)
        print(self.classes)
       # print(len(self.imgl_list), len(self.classes), len(self.people))

    def __getitem__(self, index):
        imgl = imageio.imread(self.imgl_list[index])
        cl = self.classes[index]

        # if image is grayscale, transform into rgb by repeating the image 3 times
        if len(imgl.shape) == 2:
            imgl = np.stack([imgl] * 3, 2)

        imgl, bb = preprocess(imgl, self.preprocessing_method, crop_size=self.crop_size,
                              is_processing_dataset=True, return_only_largest_bb=True, execute_default=True)

        # append image with its reverse
        imglist = [imgl, imgl[:, ::-1, :]]

        # normalization
        for i in range(len(imglist)):
            imglist[i] = (imglist[i] - 127.5) / 128.0
            imglist[i] = imglist[i].transpose(2, 0, 1)
        imgs = [torch.from_numpy(i).float() for i in imglist]
        return imgl, cl
        #return imgs, cl, imgl, bb, self.imgl_list[index], self.people[index]

    def __len__(self):
        return len(self.imgl_list)

And this is how i try to train

##########################################################################################################2.1000
#try to train
if __name__ == '__main__':

    #set_trace()
    args = edict({
        'operation' : 'train',
        'feature_file' : None,
         'result_sample_path' : None,
         'gpu' : 'GPU',
         'path_image_query' : None,
         'query_label' : 'Query label',
         'dataset' : None,
         'specific_dataset_folder_name' : 'lfw',
         'img_extension' : 'jpg',
         'preprocessing_method' : 'sphereface',
         'model_name' : 'mobiface',
         'batch_size' : 3,
         'image_query':'/content/drive/My Drive/recfaces13/recfaces/datasets/LFW',
         'train':True,
         'device':'cuda'
})
    print(args)

    # selecting the size of the crop based on the network
    if args.model_name == 'mobilefacenet' or args.model_name == 'sphereface':
        crop_size = (96, 112)
    elif args.model_name == 'mobiface' or args.model_name == 'shufflefacenet':
        crop_size = (112, 112)
    elif args.model_name == 'openface':
        crop_size = (96, 96)
    elif args.model_name == 'facenet':
        crop_size = (160, 160)
    else:
        raise NotImplementedError("Model " + args.model_name + " not implemented")

    if args.dataset is not None:
        # process whole dataset
        assert args.specific_dataset_folder_name is not None, 'To process a dataset, ' \
                                                              'the flag --specific_dataset_folder_name is required.'
        process_dataset(args.operation, args.model_name, args.batch_size,
                        args.dataset, args.specific_dataset_folder_name,
                        args.img_extension, args.preprocessing_method, crop_size,
                        args.result_sample_path, args.feature_file)

    elif args.operation == 'train':
      #set_trace()

      net = load_net('mobilefacenet', 'gpu')
      net = net.cuda()
      model_name=args.model_name
      
      dataset = LFW(args.image_query,args.specific_dataset_folder_name, args.img_extension, args.preprocessing_method, crop_size)
      dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, drop_last=False)

      

      features = None

      if args.feature_file is not None and os.path.isfile(args.feature_file):
            features = scipy.io.loadmat(args.feature_file)      
      epoch = 2
      criterion = nn.CrossEntropyLoss()
      optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
      train_loss = list()
      #set_trace()
      for i, data in enumerate(dataloader):
        
        inps, labs = data
        for i in range(len(inps)):
          #set_trace()
      
          inps, labs = inps[i].cuda(args['device']), labs.cuda(args['device'])
          inps.squeeze_(0)
          labs.squeeze_(0)
          inps = Variable(inps).cuda(args['device'])
          labs = Variable(labs).cuda(args['device'])
          optimizer.zero_grad()
          outs = net(inps[None, ...])
          soft_outs = F.softmax(outs, dim=1)
          prds = soft_outs.data.max(1)[1]
          loss = criterion(outs, labs)
          loss.backward()
          optimizer.step()
          prds = prds.squeeze_(1).squeeze_(0).cpu().numpy()
          inps_np = inps.detach().squeeze(0).cpu().numpy()
          labs_np = labs.detach().squeeze(0).cpu().numpy()
          train_loss.append(loss.data.item
                          ())
          print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (epoch, i + 1, len(train_loader), np.asarray(train_loss).mean()))

Hi,

As the error mentions, the input does not have the right shape.
GIven that it expects to have 3 channels and the size of your input, I guess you need to convert it from batch x height x width x channel to batch x channel x height x width. You can use the permute function for that.

so am i correct use permute function and put thos batch x height x width x channel in funcion and tha should solve?

Yes changing the input to be batch x channel x height x width will solve this issue.