def __init__(self):
super(CNNLenet, self).__init__()
self.conv1 = nn. Sequential(
nn.Conv2d(1,16, 5,1,2),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2),
nn.ReLU(),
nn.MaxPool2d(2)
)
# fully connected layer, output 10 classes
self.fc1 = nn.Linear(32 * 7 * 7, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# flatten the output of conv2 to (batch_size, 32 * 7 * 7)
x = x.view(x.size(0), -1)
output = self.fc1(x)
return output, x # return x for visualization
Here is optimization loop
```for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
``` ---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-23-458279d9b112> in <module>
11 # forward + backward + optimize
12 outputs = model(inputs)
---> 13 loss = criterion(outputs, labels)
14
15 loss.backward()
~\anaconda3\envs\deeplearning\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
~\anaconda3\envs\deeplearning\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
1118
1119 def forward(self, input: Tensor, target: Tensor) -> Tensor:
-> 1120 return F.cross_entropy(input, target, weight=self.weight,
1121 ignore_index=self.ignore_index, reduction=self.reduction)
1122
~\anaconda3\envs\deeplearning\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2822 if size_average is not None or reduce is not None:
2823 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2824 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2825
2826
TypeError: cross_entropy_loss(): argument 'input' (position 1) must be Tensor, not tuple
I am unable to understand this error. Kindly help me in understanding and resolving it.
3 Likes
The forward
method of your model returns a tuple
via:
return output, x # return x for visualization
which creates the issue in loss = criterion(outputs, labels)
.
I assume you want to use output
to calculate the loss, so use:
output, x = model(inputs)
loss = criterion(output, labels)
and it should work.
2 Likes