def training(train_model, number_of_epochs=3, batch_size=10, learning_rate=1e-3):
criterion = nn.MSELoss() # mean square error loss
optimizer = torch.optim.Adam(train_model.parameters(),
lr=learning_rate,
weight_decay=1e-5) # <--
for epoch in range(number_of_epochs): # loop over the dataset multiple times
running_loss = 0
total_train = 0
correct_train = 0
for i, data in enumerate(traindataloader):
# get the inputs
t_image, mask = data
# print(t_image)
# print(mask)
t_image, mask = Variable(t_image), Variable(mask)
print(type(mask))
# zeroes the gradient buffers of all parameters
optimizer.zero_grad()
# forward + backward + optimize
outputs = train_model(t_image) # forward
# print(type(outputs))
loss = criterion(outputs,t_image ) # calculate the loss
loss.backward() # back propagation
optimizer.step() # update gradients
running_loss += loss.item()
# accuracy
_, predicted = torch.max(outputs.data, 1)
# print(predicted)
total_train += t_image.nelement()
correct_train +=(predicted==t_image).sum().item()
print(correct_train)
print(total_train)
train_accuracy =100*(correct_train / total_train)
#avg_accuracy = train_accuracy / len(train_loader)
print("Epoch {}, train Loss: {:.3f},Training Accuracy = {}".format(epoch, loss.item(),train_accuracy))
Result: The accuracy is 966.73 which is, I understand is because of the division of correct_train and total_train, but the accuracy should not be like that, why it is so ???
<class ‘torch.Tensor’>
7424540
768000
Epoch 0, train Loss: 0.044,Training Accuracy = 966.7369791666667
<class ‘torch.Tensor’>
14163910
1536000
Epoch 0, train Loss: 0.041,Training Accuracy = 922.1295572916666
<class ‘torch.Tensor’>