Is my loss Courve correct ? if yes why my loss is 980641.0?

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split ,KFold
from sklearn.preprocessing import MinMaxScaler , LabelEncoder
from sklearn.metrics import mean_squared_error , mean_absolute_error
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as opt

df = pd.read_csv(“…/csvs/Housing_Prices/miami-housing.csv”)

df[“laverage”] = df[“SALE_PRC”] + df[“TOT_LVG_AREA”]
df[“SPEC_FEAT_VAL_pre_saleprice”] = df[“SALE_PRC”] + df[“SPEC_FEAT_VAL”]
df[“structure_quality_per_saleprice”] = df[“SALE_PRC”] + df[“structure_quality”]
df[“neg_CNTR_DIST_per_saleprice”] = df[“SALE_PRC”] + df[“CNTR_DIST”]
df[“neg_PARCELNO_per_saleprice”] = df[“SALE_PRC”] + df[“PARCELNO”]
df[“LONGITUDE_per_saleprice”] = df[“SALE_PRC”] + df[“LONGITUDE”]

fold = KFold(n_splits= 10 , shuffle=True)
for train_idx , test_idx in fold.split(X , y):
X_train , X_test = X[train_idx] , X[test_idx]
y_train , y_test = y[train_idx] , y[test_idx]C_FEAT_VAL"]

X_train = X_train.float()
X_test = X_test.float()
y_train = y_train.float()
y_test = y_test.float()

print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)

class House_price_predictions(nn.Module):

def __init__(self, input_dims = 22 , hidden_units = 125 , output_dims = 1):
    super().__init__()
    self.droprate = (0.2)
    self.dropout = nn.Dropout(p = self.droprate)
    self.prob_comp = nn.Softmax(dim = 1)
    self.activation = nn.LeakyReLU()
    
    self.ll1 = nn.Linear(in_features= input_dims , out_features= hidden_units)
    self.ll2 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    
    self.ll3 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    self.ll4 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    
    self.ll5 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    self.ll6 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    
    self.ll7 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    self.ll8 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    
    self.ll9 = nn.Linear(in_features= hidden_units , out_features= hidden_units)
    self.ll10 = nn.Linear(in_features= hidden_units , out_features= output_dims)
    
def forward(self , X):
    
    X = self.activation(self.ll1(X))
    X = self.activation(self.ll2(X))
    X = self.dropout(X)
    
    X = self.activation(self.ll3(X))
    X = self.activation(self.ll4(X))
    X = self.dropout(X)
    
    X = self.activation(self.ll5(X))
    X = self.activation(self.ll6(X))
    X = self.dropout(X)
    
    X = self.activation(self.ll7(X))
    X = self.activation(self.ll8(X))
    X = self.dropout(X)
    
    X = self.activation(self.ll9(X))
    X = self.activation(self.ll10(X))
    X = self.dropout(X)
    
    X = self.prob_comp(X)
    
    return X

class training_and_testing():

def __init__(self):
    self.lr = 1e-4 
    self.device = T.device("cuda:0" if T.cuda.is_available() else "cpu")
    self.epochs = 50
    self.loss = nn.MSELoss()
    self.model = House_price_predictions().to(self.device)
    self.criterion = opt.Adam(self.model.parameters() , lr = self.lr)
    
    self.X_train = X_train
    self.X_test = X_test
    self.y_train = y_train
    self.y_test = y_test

    
def train_loop(self):

    for epochs in range(self.epochs):
        train_acc , test_acc = [] , []

        self.model.train()

        forward_pass = self.model(self.X_train)

        loss_fn = self.loss(forward_pass , self.y_test)
        loss_fn.backward()

        self.criterion.zero_grad()
        self.criterion.step()

        train_acc.append(loss_fn.item())

        self.model.eval()

        with T.inference_mode():
            test_pred = self.model(self.X_train)
            eval_loss = self.loss(test_pred , self.y_test)
            test_acc.append(eval_loss / len(eval_loss))
            print(f"Epoch [{epochs+1}/{self.epochs}], Train Loss: {train_acc} , Eval_loss : {test_acc}")

    plt.plot(range(1 , self.epochs +1 ) , train_acc , label = "Train_Acc" , c = "cyan")
    plt.plot(range(1 , self.epochs +1 ) , eval_loss , label = "Evaluation_Acc" , c = "blue")
    plt.xlabel("Training")
    plt.ylabel("Evalutaion")
    plt.title("Model Performance")
    plt.legend(loc = (1,0))
    plt.show();

def parameters(self):
    return self.model.state_dict()

def predict(self):
    pred = self.model(X_test)

    mae = mean_absolute_error(self.y_test.detach().numpy() , pred.detach().numpy())

    mse =mean_squared_error(self.y_test.detach().numpy() , pred.detach().numpy())

    print(f"mean_absolute_error is {mae}")
    print(f"mean_squared_error is {mse}")

training_and_testing().train_loop()
training_and_testing().predict()

Your code has a few issues:

  • Using a softmax with nn.MSELoss is unusual and I guess you are working on a regression use case so I would remove the softmax.
  • Applying softmax on a single value does not make sense and if I understand your code correctly your model returns a single value for each sample.
  • You are calling zero_grad() after the backward() call so your parameters won’t be updated.