Only integer tensors of a single element can be converted to an index

Hi

def create_dset(patchH, patchW, PatchperImage, settype='train'):
  """
  patchH: Patch height
  patchW: Patch width
  PatchperImage: Number of patches per image
  settype: Can be either train or test
  """
  if settype == 'train':
    Datapath = /training/images/'
    # 2 different annotations by different oplthalmologists
    Labelpath = '/data/training/labels/'
  elif settype == 'test':
    Datapath = '/test/images/'
    # 2 different annotations by different oplthalmologists
    Labelpath = data/test/1st_manual/'
  else:
    raise ValueError("settype can be either 'test' or 'train'")
  
  images = torch.DoubleTensor(20*PatchperImage,3*patchH*patchW) # 20 such images
  labels = torch.DoubleTensor(20*PatchperImage,patchH*patchW) 
  t_no = 0
  for img_no in range(20):
      if settype == 'train':
        dp = Datapath + str(img_no+21) + '_training.tif'
        lp = Labelpath + str(img_no+21) + '_manual1.gif'
      elif settype == 'test':
        dp = Datapath + "%02d"%(img_no+1) + '_test.tif'
        lp = Labelpath + "%02d"%(img_no+1) + '_manual1.gif'
      imD = Image.open(dp)
      imD = np.array(imD)    

      imL = Image.open(lp)
      imL = np.array(imL)
      imL = np.reshape(imL, (imL.shape[0],imL.shape[1],1))
      imD,imL = img_transfer(imD,imL, patchH, patchW, PatchperImage)
      imD = imD/255.0
      imL = imL/255.0
      for i in range(PatchperImage):
          images[t_no] = torch.from_numpy(imD[i])
          labels[t_no] = torch.from_numpy(imL[i])
          t_no = t_no + 1
  return images, labels

the code above create a patches for the dataset, i would like to pass it to the custom data loader shown below to do some preprocessing but it gives me an error

TypeError: only integer tensors of a single element can be converted to an index

what is a possible way to do the preprocessing if I want to divide the image into patches ?

custom data loader

ain_transform = tfms.Compose([tfms.Resize((512,512)),tfms.RandomAffine(20, translate=None,
                                scale=None, shear=None, resample=False, fillcolor=0)
                                ,tfms.Grayscale(num_output_channels=1),tfms.RandomHorizontalFlip(),tfms.RandomRotation(45, resample=False, expand=False, center=None)
        ,tfms.ToTensor() , tfms.Normalize([0.314181] ,[0.27204])
                                                            ])
        
class CDataLoader(data.Dataset):
    def __init__(self,root_dir,seg_dir,transforms = None):
        self.root_dir = root_dir
        self.seg_dir = seg_dir
        self.transforms = transforms
        self.files = os.listdir(self.root_dir)
        self.lables = os.listdir(self.seg_dir)
    
    def __len__(self):
        return len(self.files)
    
    def __getitem__(self,idx):
        img_name = self.files[idx]
        label_name = self.lables[idx]
        img = Image.open(os.path.join(self.root_dir,img_name))
        label = Image.open(os.path.join(self.seg_dir,label_name))
        if self.transforms:
            img = self.transforms(img)
            label = self.transforms(label)
            return img,label
        else:
            return img, label
train_dataset = CDataLoader(TrainImages ,TrainLabels,transforms =train_transform)


train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)

any suggestion please ?