Hello,
I was working on my research of image classification.
Here’s the code (part in a jupyter cell):
# Training loop
num_total_steps = len(Train_loader.dataset)
step = 0
losses = []
accuracies = []
steps = []
for epoch in range(num_epochs):
train_loss = 0.0
train_acc = 0.0
for i , (images, labels) in enumerate(Train_loader):
images = images.to(device)
# print(len(images))
labels = labels.to(device)
# print(labels)
#forward
outputs = model(images)
# print(outputs)
loss = criterion(outputs, labels)
losses.append(loss.item())
# backwards and optimizer
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Calculating running training accuracies
_, predictions = outputs.max(1)
print((predictions))
num_correct = (predictions == labels).sum()
running_train_acc = float(num_correct)/float(images.shape[0])
accuracies.append(running_train_acc)
train_acc += running_train_acc
train_loss += loss.item()
avg_train_acc = train_acc / len(Train_loader)
avg_train_loss = train_loss / len(Train_loader)
writer.add_scalar('Training Loss', loss, global_step= step)
writer.add_scalar('Training Accuracy', running_train_acc, global_step=step)
step += 1
steps.append(step)
if (i+1) % 10 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i*len(images), num_total_steps, loss.item()))
torch.cuda.empty_cache()
print('Training Ended')
And here’s the error I get:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/tmp/ipykernel_34/669514399.py in <module>
17 outputs = model(images)
18 # print(outputs)
---> 19 loss = criterion(outputs, labels)
20 losses.append(loss.item())
21
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
1119 def forward(self, input: Tensor, target: Tensor) -> Tensor:
1120 return F.cross_entropy(input, target, weight=self.weight,
-> 1121 ignore_index=self.ignore_index, reduction=self.reduction)
1122
1123
/opt/conda/lib/python3.7/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2822 if size_average is not None or reduce is not None:
2823 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2824 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2825
2826
RuntimeError: 1only batches of spatial targets supported (3D tensors) but got targets of size: : [12]
So then I tried to print the shape of outputs
and labels
.
The shape of outputs
: torch.Size([12, 2, 224, 224])
The shape of labels
: torch.Size([12])
The shape looks alright since the labels
variable is for the classification label.
tensor([1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1], device='cuda:0')
Is anyone familiar with this problem? Thanks!