For the sake of getting a single pass working on my model first, I set the batch size = 1 to bypass the need for a collate_fn.
After that, I originally got this error:
{
"name": "RuntimeError",
"message": "Expected 3D (unbatched) or 4D (batched) input to conv2d, but got input of size: [1, 28, 128, 16, 16]",
"stack": "---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[22], line 17
15 model_0.eval()
16 with torch.inference_mode():
---> 17 pred = model_0(img_single.to(device))
19 # 4. Print out what's happening and convert model logits -> pred probs -> pred label
20 print(f\"Output logits:\
{pred}\
\")
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
-> 1518 return self._call_impl(*args, **kwargs)
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don't have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
Cell In[21], line 45, in TinyVGG.forward(self, x)
44 def forward(self, x: torch.Tensor):
---> 45 x = self.conv_block_1(x)
46 # print(x.shape)
47 x = self.conv_block_2(x)
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
-> 1518 return self._call_impl(*args, **kwargs)
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don't have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/container.py:215, in Sequential.forward(self, input)
213 def forward(self, input):
214 for module in self:
--> 215 input = module(input)
216 return input
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
-> 1518 return self._call_impl(*args, **kwargs)
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don't have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/conv.py:460, in Conv2d.forward(self, input)
459 def forward(self, input: Tensor) -> Tensor:
--> 460 return self._conv_forward(input, self.weight, self.bias)
File ~/Library/Python/3.9/lib/python/site-packages/torch/nn/modules/conv.py:456, in Conv2d._conv_forward(self, input, weight, bias)
452 if self.padding_mode != 'zeros':
453 return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
454 weight, bias, self.stride,
455 _pair(0), self.dilation, self.groups)
--> 456 return F.conv2d(input, weight, bias, self.stride,
457 self.padding, self.dilation, self.groups)
RuntimeError: Expected 3D (unbatched) or 4D (batched) input to conv2d, but got input of size: [1, 28, 128, 16, 16]"
}
Then, I went back to trying to get the collate_fn to work with a batch size of 16. This is what collate_fn looks like for me:
def custom_collate(data):
imgs = []
labels = []
for batch in data:
# one label per batch
label = batch[1]
# multiple imgs per batch, need to iterate over
for img in batch[0]:
# img = torch.tensor(img)
imgs.append(img)
# labels = torch.tensor(label)
labels.append(label)
imgs = torch.stack(imgs)
labels = torch.tensor(labels)
return imgs, labels
The issue that I am running into is that it is still returning batch sizes that are inconsistent. For example, here are the first 3:
[36, 128, 16, 16]
[475, 128, 16, 16]
[56, 128, 16, 16]
Then, when I go to train there are errors with inconsistent batch size. Is there a way to tweak what I have so that the batch size is determined in the DataLoader setup? So with the code below I would like that to be 16, for example:
train_dataloader_custom = DataLoader(dataset=train_data_custom, # use custom created train Dataset
batch_size=16, # how many samples per batch?
num_workers=0, # how many subprocesses to use for data loading? (higher = more)
# subprocesses are multiple processes, all loading data simultaneously
shuffle=True, # shuffle the data?
collate_fn = custom_collate
)
Thanks so much for your help as always, I hope what I am saying makes sense
Thanks