i want my output of loader to be a list of lists such that each inner list is of frames of videos through which i can traverse during inference in a loop.
But my collate function seems to be not invoked. i dont see X getting printed
for i in vidloader_test:
print(len(i))
break
def my_collate(batch):
data = [item[0] for item in batch]
target = [item[1] for item in batch]
target = torch.LongTensor(target)
print('x')
return [data, target]
vidset_test = vidSet(Path_test)
vidset_valid = vidSet(Path_train)
vidset_train = vidSet(Path_train)
vidloader_test= torch.utils.data.DataLoader(vidset_test, batch_size=3, shuffle=True,collate_fn=my_collate)
vidloader_valid= DataLoader(vidset_valid, batch_size=64, shuffle=False)
vidloader_train= DataLoader(vidset_train, batch_size=64, shuffle=False)
class vidSet(Dataset):
def __init__(self, videos_path):
self.video_paths = videos_path.ls()
self.root=videos_path
self.c=2
#self.itemlist=self.video_paths
#self.caps = [cv2.VideoCapture(str(split_path)) for video_path in self.video_paths]
#self.images = [[capid, framenum] for capid, cap in enumerate(self.caps) for framenum in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))]
#self.labels = [label for i in range(len(self.images))] # Whatever your needs are
def __len__(self):
return len(self.video_paths)
def __getitem__(self, idx):
#capid, framenum = self.images[idx]
faces_list = read_video(mtcnn,path=self.video_paths[idx])
if faces_list is None :
print('None')
#cap.set(cv2.CAP_PROP_POS_FRAMES, framenum)
#res, frame = cap.read()
#img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#label = self.labels[idx]
#img_tensor = torch.from_numpy(img).permute(2,0,1).float() # /255, -mean, /std ... do your things with the image
#label_tensor = torch.as_tensor(label)
print('sampleid',idx,'length of frame list returned ',len(faces_list))
return faces_list,0
sampleid 269 length of frame list returned 2
sampleid 222 length of frame list returned 3
sampleid 236 length of frame list returned 2