Learning training / test errors and their accuracy

I trained a neural network. I did this to learn about my training and testing achievements as well as my error rates, do you think it is correct? can i trust my own code? I want to get an idea about bias and variance.

Sorry for my english I use google translation :slight_smile:*

class ANN(nn.Module):
def init(self,input_dim,hidden_dim,output_dim):
super(ANN,self).init()

    self.fc1=nn.Linear(input_dim,hidden_dim)
    self.aktivasyon1=nn.SELU()
    self.fc2=nn.Linear(hidden_dim,hidden_dim)
    self.aktivasyon2=nn.SELU()
    self.fc3=nn.Linear(hidden_dim,hidden_dim)
    self.aktivasyon3=nn.SELU()
    self.fc4=nn.Linear(hidden_dim,output_dim)
    
def forward(self,x):
    x=self.fc1(x)
    x=self.aktivasyon1(x)
    x=self.fc2(x)
    x=self.aktivasyon2(x)
    x=self.fc3(x)
    x=self.aktivasyon3(x)
    x=self.fc4(x)
    return x

input_dim=28*28
hidden_dim=150
output_dim=10

model=ANN(input_dim,hidden_dim,output_dim).to(device)

optimizer=torch.optim.SGD(model.parameters(),lr=0.001)

error=nn.CrossEntropyLoss()
loss_list = []

for epoch in range(num_epochs):
#lr.step()
for i, (images,labels) in enumerate(train_l):
images=images.to(device)
labels=labels.to(device)

    images=Variable(images.view(-1,28*28))
    labels=Variable(labels).to(device)
    
    optimizer.zero_grad()
    out=model(images.float())
    loss=error(out,labels)
    loss.backward()
    optimizer.step()


def check_accuracy(loader, model):

total=0
correct=0
correct_error=0
model.eval()
with torch.no_grad():
        
    for images,labels in loader:
        images=images.to(device) 
        labels=labels.to(device)
        images=Variable(images.view(-1,28*28))
           
        out=model(images.float())
        y_pred=torch.max(out.data,1)[1]
        total+=len(labels)  
        correct += (y_pred == labels).sum()
        
    accuracy = 100 * correct / float(total)
    error= 100 - accuracy

    print('Accuracy: {}% Error: {}%'.format(accuracy,error))
model.train()

print(β€œTrain Result:”)
*****check_accuracy(train_l, model)
print("----------------------------------------------------")
print(β€œTest Result:”)
*****check_accuracy(test_l, model)

–>Train Result:
Accuracy: 89% Error: 11%

–>Test Result:
Accuracy: 88% Error: 12%

It looks like you can trust your model. Your code works fine and if the model is doing well on the test set that means it is not overfitting.

thank you my friend. these views made me happy