I am facing the transformation error while calling the train function in CNN model
Here is my code that i used for dataloader
#make a class for data
class OAIDataset(Dataset):
def __init__(self, csv_file,root_dir, transform=None):
# self.data = pd.read_csv(csv_file, header=None)
self.data = csv_file
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
img_name = os.path.join(self.root_dir, str(self.data['ID'].iloc[idx])+'.npy')
patches, p_id = np.load(img_name, allow_pickle= True)
img_class = int(self.data.iloc[idx,2])
side = self.data.iloc[idx,1]
if side ==1:
image = np.array(patches['R'].astype('uint8'), 'L') #[image['R':side]|image['L':side]]
else:
image = np.array(patches['L'].astype('uint8'), 'L')
if self.transform is not None:
image = self.transform(image)
sample = {'image': image, 'grade':img_class}
return sample
# Define our data transforms
#
trans = transforms.Compose([ transforms.ToTensor()])
# Call the dataset
#trans =transforms.ToTensor()
train_data =OAIDataset(train_df,root_dirA,transform=trans )
test_data =OAIDataset(test_df,root_dirA,transform=trans)
from sklearn.model_selection import GroupKFold # import sklearn module
# creating kfold and make indices
kfold = GroupKFold(n_splits=5)
snapshots = []
#iterating over folds
for train_index, val_index in kfold.split(X=train_df, y=grades_dev, groups=groups_dev):
print("\nTRAIN:", train_index, "TEST:", val_index)
#Subset of a dataset at saccording to indices
train_set = torch.utils.data.Subset(train_data, train_index)
val_set = torch.utils.data.Subset(train_data, val_index)
#load data(train and validation)
train_loader = torch.utils.data.DataLoader(train_set,batch_size=50,shuffle=True,num_workers=0)
val_loader = torch.utils.data.DataLoader(val_set,batch_size=50,shuffle=False,num_workers=0)
model = ConvNet()
def train_val_model(model, epoch):
model.train() #set the model to training mode
for epoch in range(epoch):
losses= 0
closs = 0
for i,batch in enumerate(train_loader):
image, grade = batch['image'], batch['grade']
#image=image.unsqueeze(1).type(torch.FloatTensor)
#image = torch.from_numpy(image).float()
optimizer.zero_grad()
prediction = model(image)
loss = costFunction(prediction,grade)
closs += loss.item()
loss.backward()
optimizer.step()
# storing the loss
losses += loss.data[0]
# losses.append(loss.item())
# num_times = num_times + 1
print('epoch',epoch,'losses',closs)
Here is my error massages
Traceback (most recent call last):
File "/home/fatema/Downloads/assignment3/DATAfile.py", line 288, in <module>
train_val_model(model,1)
File "/home/fatema/Downloads/assignment3/DATAfile.py", line 154, in train_val_model
for i,batch in enumerate(train_loader):
File "/home/fatema/miniconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 560, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/fatema/miniconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 560, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/fatema/miniconda3/lib/python3.6/site-packages/torch/utils/data/dataset.py", line 107, in __getitem__
return self.dataset[self.indices[idx]]
File "/home/fatema/Downloads/assignment3/DATAfile.py", line 62, in __getitem__
image = self.transform(image)
File "/home/fatema/miniconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 49, in __call__
img = t(img)
File "/home/fatema/miniconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 76, in __call__
return F.to_tensor(pic)
File "/home/fatema/miniconda3/lib/python3.6/site-packages/torchvision/transforms/functional.py", line 48, in to_tensor
img = torch.from_numpy(pic.transpose((2, 0, 1)))
ValueError: axes don't match array
Process finished with exit code 1
I tried to implement sever transformation but could not solved.