Hi there,
I am trying to implement my first CNN to classify pathological and healthy images.
During the training, I have an error that I can’t correct.
Despite searching on google I can’t find a solution.
Is there a kind soul who could help me?
The code is as follows:
#def train_net(n_epoch): # Training our network
losses =
for epoch in range(n_epoch): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
losses.append(loss)
running_loss += loss.item()
if i % 100 == 99: # print every 2000 mini-batches
print('[%d, %5d] loss: %.10f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
plt.plot(losses, label=‘Training loss’)
plt.show()
ValueError Traceback (most recent call last)
/var/folders/7x/f_g9k1797vn0y8xdfwzz10gc0000gn/T/ipykernel_71356/646090411.py in
13
14 outputs = model(inputs)
—> 15 loss = criterion(outputs, labels)
16 loss.backward()
17 optimizer.step()
~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = ,
~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
610
611 def forward(self, input: Tensor, target: Tensor) → Tensor:
→ 612 return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
613
614
~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/functional.py in binary_cross_entropy(input, target, weight, size_average, reduce, reduction)
3054 reduction_enum = _Reduction.get_enum(reduction)
3055 if target.size() != input.size():
→ 3056 raise ValueError(
3057 "Using a target size ({}) that is different to the input size ({}) is deprecated. "
3058 “Please ensure they have the same size.”.format(target.size(), input.size())
ValueError: Using a target size (torch.Size([32])) that is different to the input size (torch.Size([32, 1, 128, 2])) is deprecated. Please ensure they have the same size.