Hi,
I try to run my googlenet model on CelebA data set (list_landmarks_align_celeba as a label) and i facing some issues with loss.backward() in the code below:
model = GoogLeNet()
criterion = nn.functional.mse_loss
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
inputs, labels = next(iter(trl))
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Start the forward pass
prediction0, aux_pred_1, aux_pred_2 = model(inputs)
real_loss = criterion(prediction0, labels)
aux_loss_1 = criterion(aux_pred_1, labels)
aux_loss_2 = criterion(aux_pred_2, labels)
loss = real_loss + 0.3 * aux_loss_1 + 0.3 * aux_loss_2
loss.backward()
# optimizer.step()
with this out put:
out shape is torch.Size([2, 128, 4, 4])
out shape is torch.Size([2, 128, 4, 4])
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/tmp/ipykernel_3736/3018090650.py in <module>
16 aux_loss_2 = criterion(aux_pred_2, labels)
17 loss = real_loss + 0.3 * aux_loss_1 + 0.3 * aux_loss_2
---> 18 loss.backward()
19 # optimizer.step()
~/anaconda3/envs/py35-2/lib/python3.9/site-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
485 inputs=inputs,
486 )
--> 487 torch.autograd.backward(
488 self, gradient, retain_graph, create_graph, inputs=inputs
489 )
~/anaconda3/envs/py35-2/lib/python3.9/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
195 # some Python versions print out the first line of a multi-line function
196 # calls in the traceback and some print out the last line
--> 197 Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
198 tensors, grad_tensors_, retain_graph, create_graph, inputs,
199 allow_unreachable=True, accumulate_grad=True) # Calls into the C++ engine to run the backward pass
RuntimeError: Found dtype Long but expected Float