How to compute the validation loss? (Simple linear regression)

I am currently learning how to use PyTorch to build a neural network. I have learned keras before and I would like to do the same thing in PyTorch like ‘model.fit’ and plotting a graph containing both training loss and validation loss.

In order to know whether the model is underfitting or not, I have to plot a graph to compare the training loss and validation loss.

However, I cannot compute the right validation loss. I know that gradients should only be updated during training and it should not be updated during testing/validation. With no change in gradients, does it mean the loss will not change? Sorry, my concept is not clear enough. But I think not, loss should be computed by comparing expected output and prediction using loss function.

In my code, 80 datasets are used for training and 20 datasets are used for validation. In my code, the neural network is prediction this formula: y =2X^3 + 7X^2 - 8*X + 120 It is easy to compute so I use this for learning how to build neural network through PyTorch.

Here is my code:

import torch
import torch.nn as nn    #neural network model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.preprocessing import MinMaxScaler

#Load datasets
dataset = pd.read_csv('test_100.csv')

X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1:].values

X_scaler = MinMaxScaler()
Y_scaler = MinMaxScaler()
print(X_scaler.fit(X))
print(Y_scaler.fit(Y))
X = X_scaler.transform(X)
Y = Y_scaler.transform(Y)

x_temp_train = X[:79]
y_temp_train = Y[:79]
x_temp_test = X[80:]
y_temp_test = Y[80:]

X_train = torch.FloatTensor(x_temp_train)
Y_train = torch.FloatTensor(y_temp_train)
X_test = torch.FloatTensor(x_temp_test)
Y_test = torch.FloatTensor(y_temp_test)

D_in = 1 # D_in is input features
H = 24 # H is hidden dimension
D_out = 1 # D_out is output features.

#Define a Artifical Neural Network model
class Net(nn.Module):
#------------------Two Layers------------------------------
    def __init__(self, D_in, H, D_out):
        super(Net, self).__init__()

        self.linear1 = nn.Linear(D_in, H)  
        self.linear2 = nn.Linear(H, D_out)
        
    def forward(self, x):
        h_relu = self.linear1(x).clamp(min=0)
        prediction = self.linear2(h_relu)
        return prediction
model = Net(D_in, H, D_out)
print(model)

#Define a Loss function and optimizer
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.2) #2e-7

#Training
inputs = Variable(X_train)
outputs = Variable(Y_train)
inputs_val = Variable(X_test)
outputs_val = Variable(Y_test)
loss_values = []
val_values = []
epoch = []
for phase in ['train', 'val']:
    if phase == 'train':
        model.train()  # Set model to training mode
    else:
        optimizer.zero_grad() #zero the parameter gradients
        model.eval()   # Set model to evaluate mode
    for i in range(50):      #epoch=50
        if phase == 'train':
            model.train()
            prediction = model(inputs)
            loss = criterion(prediction, outputs) 
            print('train loss')
            print(loss)
            loss_values.append(loss.detach())
            optimizer.zero_grad() #zero the parameter gradients
            epoch.append(i)
            loss.backward()       #compute gradients(dloss/dx)
            optimizer.step()      #updates the parameters
        elif phase == 'val':
            model.eval()
            prediction_val = model(inputs_val)
            loss_val = criterion(prediction_val, outputs_val) 
            print('validation loss')
            print(loss)
            val_values.append(loss_val.detach())
            optimizer.zero_grad() #zero the parameter gradients
          
plt.plot(epoch,loss_values)
plt.plot(epoch, val_values)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','validation'], loc='upper left')
plt.show()

Here is the result:

train loss
tensor(0.9788, grad_fn=<MseLossBackward>)
tensor(2.0834, grad_fn=<MseLossBackward>)
tensor(3.2902, grad_fn=<MseLossBackward>)
tensor(0.8851, grad_fn=<MseLossBackward>)
tensor(0.0832, grad_fn=<MseLossBackward>)
tensor(0.0402, grad_fn=<MseLossBackward>)
tensor(0.0323, grad_fn=<MseLossBackward>)
tensor(0.0263, grad_fn=<MseLossBackward>)
tensor(0.0217, grad_fn=<MseLossBackward>)
tensor(0.0181, grad_fn=<MseLossBackward>)
tensor(0.0153, grad_fn=<MseLossBackward>)
tensor(0.0132, grad_fn=<MseLossBackward>)
tensor(0.0116, grad_fn=<MseLossBackward>)
tensor(0.0103, grad_fn=<MseLossBackward>)
tensor(0.0094, grad_fn=<MseLossBackward>)
tensor(0.0087, grad_fn=<MseLossBackward>)
tensor(0.0081, grad_fn=<MseLossBackward>)
tensor(0.0077, grad_fn=<MseLossBackward>)
tensor(0.0074, grad_fn=<MseLossBackward>)
tensor(0.0072, grad_fn=<MseLossBackward>)
tensor(0.0070, grad_fn=<MseLossBackward>)
tensor(0.0068, grad_fn=<MseLossBackward>)
tensor(0.0067, grad_fn=<MseLossBackward>)
tensor(0.0067, grad_fn=<MseLossBackward>)
tensor(0.0066, grad_fn=<MseLossBackward>)
tensor(0.0065, grad_fn=<MseLossBackward>)
tensor(0.0065, grad_fn=<MseLossBackward>)
tensor(0.0065, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0064, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0063, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0062, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)

validation loss
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)
tensor(0.0061, grad_fn=<MseLossBackward>)

I don’t know why validation loss is always the same. It will be a flat line in the graph. It is not what I want.

Thanks for your time!

Based on the output it seems your model training has reached a plateau, so you would need to play around with some hyperparameters, such as the learning rate etc.
Is the training loss decreasing in the following epochs or is it also approx. constant?

Also, Variables are deprecated since PyTorch 0.4, so you can use tensors now. :wink:

Thanks for response!
The problem is solved by modifying the procedure of training and validation.

Original:

1.all epochs training

  1. all epochs validation

Correct procedure:

  1. epoch 1 training

  2. epoch 1 validation

  3. epoch 2 training

  4. epoch 2 validation