Can someone help me, i could not pass through this error
import torch.optim as optim
import torch.nn.functional as F
Loss function
criterion = nn.BCEWithLogitsLoss()
Optimizer
optimizer = optim.Adam(model.parameters(), lr=0.001)
Define number of training epochs
EPOCHS = 10
train the model
for epoch in range(EPOCHS):
# clear gradients
optimizer.zero_grad()
running_loss = 0
for images, labels in train_loader:
# move data to device
images = images.to(device)
labels = labels.to(device)
labels = labels.view(-1, 1) # reshape labels
labels = torch.round(labels).to(torch.float).view(-1, 1)
# zero gradients
optimizer.zero_grad()
# forward pass
outputs = model(images)
outputs = outputs.view(-1, 1) #reshape output
running_loss = criterion(outputs, labels)
# backward pass
running_loss.backward()
# update model parameters
optimizer.step()
running_loss += running_loss.item()
# print loss and accuracy
print("epoch:", epoch, "loss:", running_loss.item())
running_loss = 0
# validation loss
test_loss = 0
with torch.no_grad():
for images, labels in val_loader:
# move data to device
images = images.to(device)
labels = labels.to(device)
labels = torch.round(labels).to(torch.float).view(-1, 1)
# forward pass
outputs = model(images)
outputs = outputs.view(-1, 1) # reshape output
test_loss += criterion(outputs, labels)
test_loss.backward()
optimizer.step()
test_loss += test_loss.item()
# print loss and accuracy
print("epoch:", epoch, "loss:", running_loss.item(), "test_loss:", test_loss.item())
evaluate model on test data
with torch.no_grad():
test_loss = 0
for images, labels in test_generator:
# move data to device
images = images.to(device)
labels = labels.to(device)
labels = labels.view(-1, 1) # reshape labels
# forward pass
test_outputs = model(images)
test_loss += criterion(test_outputs, labels)
test_loss = test_loss / len(test_generator)
print("Test loss:", test_loss.item())
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_14840\2968328638.py in
30 outputs = model(images)
31 outputs = outputs.view(-1, 1) #reshape output
—> 32 running_loss = criterion(outputs, labels)
33
34 # backward pass
~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
~\anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
718
719 def forward(self, input: Tensor, target: Tensor) → Tensor:
→ 720 return F.binary_cross_entropy_with_logits(input, target,
721 self.weight,
722 pos_weight=self.pos_weight,
~\anaconda3\lib\site-packages\torch\nn\functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
3158
3159 if not (target.size() == input.size()):
→ 3160 raise ValueError(“Target size ({}) must be the same as input size ({})”.format(target.size(), input.size()))
3161
3162 return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
ValueError: Target size (torch.Size([4, 1])) must be the same as input size (torch.Size([16, 1]))