I’m making a simple autoencoder on Mnist Digits
So the problem i’m facing is that i’m defining 3 different losses
and when i combine them the loss doesn’t decrease while using only one seems to work fine
epochs = 3
model_2.train()
for e in range(epochs):
loss_r = [0,0,0]
r = 0
for train , test in zip(train_loader_2,train_loader) :
images = train[0].view(-1,14*14)
labels_1 = train[1]
labels_2 = test[0].view(-1,784)
optimizer.zero_grad()
x , x_en , x_dec = model_2(images)
loss_1 = criterion_1(x,labels_2)
loss_2 = criterion_2(x_en,labels_2)
loss_3 = criterion_3(x_dec,labels_1)
torch.autograd.backward([loss_1,loss_2,loss_3])
optimizer.step()
loss_r[0] += loss_1.item()
loss_r[1] += loss_2.item()
loss_r[2] += loss_3.item()
if r%10 == 9 :
print("epoch = {} batch = {} final_loss={} aux_loss = {} classification_loss={}"
.format(e+1,r+1,loss_r[0]/10,loss_r[1]/10,loss_r[2]/10))
loss_r = [0,0,0]
r+=1
what seems to be the problem here