# Per epoch loss equation

``````from torch import optim
def fit(train_dl,model,test_dl=None,epoch=1000,lr=0.001):

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

for epoch in range(epoch):
running_loss = 0.0
model.train()
for i,(inputs, labels) in enumerate(train_dl):

outputs = model(inputs)
loss = criterion(outputs, labels)

loss.backward()
optimizer.step()
running_loss += loss.item()*inputs.size(0)

# if i % 2000 == 1999:
#     print('[%d, %5d] loss: %.3f' %
#           (epoch + 1, i + 1, running_loss/2000))
#     running_loss = 0.0
print("Train Loss: ",running_loss/len(train_dl.sampler))

if test_dl is not None:
model.eval()
running_loss = 0
for i,(inputs, labels) in enumerate(test_dl):
outputs = model(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item()*inputs.size(0)
print("Validation Loss: ",running_loss/len(test_dl.sampler))

print('Finished Training')
``````

Is the equation for the per epoch loss correct?

Could you explain some more of what it is you want to do? Is there any particular reason as to why you are not simply calculating the mean loss of every epoch and doing something like:

``````for epoch in range(num_epochs):
losses = []
for idx, (inputs, labels) in enumerate(train_dl):
...
loss = criterion(outputs, labels)
losses += loss.item()

print(
f"Mean loss at epoch [{epoch}/{num_epochs}] was {sum(losses)/len(losses)}"
)

``````