Calculate final model accuracy at the end of training

I would like to calculate the model accuracy at the end of the training. At the moment the test mean error is calculated at the end of each epoch. I’m saving the test mean error at the end of each epoch. Then calculating the accuracy for the previous epochs. The code doesn’t seem to calculate the accuracy for some reason. It only print the test mean error. Please could someone shed some light to what i am doing wrong?

model.eval()
    for images, paths in tqdm(loader_test):
        images = images.to(device)
        targets = torch.tensor([metadata['count'][os.path.split(path)[-1]] for path in paths]) # B
        targets = targets.float().to(device)

        # forward pass:
        output = model(images) # B x 1 x 9 x 9 (analogous to a heatmap)
        preds = output.sum(dim=[1,2,3]) # predicted cell counts (vector of length B)

        # logging:
        loss = torch.mean((preds - targets)**2)
        count_error = torch.abs(preds - targets).mean()
        mean_test_error += count_error
        writer.add_scalar('test_loss', loss.item(), global_step=global_step)
        writer.add_scalar('test_count_error', count_error.item(), global_step=global_step)
        
        global_step += 1

    average_accuracy = 0 
    mean_test_error = mean_test_error / len(loader_test)
    writer.add_scalar('mean_test_error', mean_test_error.item(), global_step=global_step)
    average_accuracy += mean_test_error
    average_accuracy = average_accuracy /len(loader_test)
    print("Average accuracy: %f" % average_accuracy)
    print("Test count error: %f" % mean_test_error)
    if mean_test_error < best_test_error:
        best_test_error = mean_test_error
        torch.save({'state_dict':model.state_dict(),
                    'optimizer_state_dict':optimizer.state_dict(),
                    'globalStep':global_step,
                    'train_paths':dataset_train.files,
                    'test_paths':dataset_test.files},checkpoint_path)