Hi!
I’ve written up a simple (ish) model by myself for the first time without relying on a tutorial.
However, when I run the model, I get an accuracy of 0 with the test loop.
I know this is a broad question, but can anyone help me out and ascertain as to why this is happening?
The code:
# -*- coding: utf-8 -*-
import torch
import numpy as np
import matplotlib.pyplot as plt
import random as rand
from torch.utils.data import Dataset
from torch import nn
from datetime import datetime
dev = torch.device(
"cuda")
"""Putting in the training and test data. The test data is generated from random X and Y points from a linear function: `y = mx + c`."""
x_points = []
y_points = []
for series in range(100):
generatedpoint = rand.uniform(-100,100)
x_point = generatedpoint
x_points.append(x_point)
y_points.append(22*generatedpoint+12)
print(len(x_points))
print(x_points)
print(y_points)
class NeuralNet(nn.Module):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Linear(1,5),
nn.ReLU(),
nn.Linear(5,5),
nn.ReLU(),
nn.Linear(5,1),
)
def forward(self,x):
prediction = self.network(x)
return prediction
model = NeuralNet()
print(model)
# Hyperparameters
lr = 0.001
batch_size = 32
epochs = 100
loss_fn = nn.MSELoss()
optim = torch.optim.Adam(model.parameters(),lr = lr)
# optim.zero_grad() # Use purely for debugging and trouble-shooting :)
# Training Loop
def training_loop(x_points, y_points,model,loss_fn,optimiser):
model.train()
counter = 0
for mouse in x_points:
# Get an output based on an input from the model
if counter >= 1000:
counter = 0
x = x_points[counter]
y = y_points[counter]
counter = counter+1
x = torch.tensor([x])
y = torch.tensor([y])
prediction = model(x)
# Computer loss and backpropogate
loss = loss_fn(prediction,y)
loss.backward()
optimiser.step()
optimiser.zero_grad()
# Provide an output to render the user less bored
if counter % 100 == 0:
loss,current = loss.item(), mouse+1 * len(x)
print("Loss: ",loss, " Current: ",current)
# Testing Loop!
def testing_loop(x_points, y_points, model, loss_fn, log_interval=1000, epoch=0):
model.eval()
size = len(x_points)
correct = 0
counter = 0
with torch.no_grad():
for batch_idx in range(size):
if counter >= size:
break # Exit the loop if we've processed all data points
x = torch.tensor([x_points[counter]])
y = torch.tensor([y_points[counter]])
prediction = model(x)
# Convert the prediction to class labels (assuming model output is probabilities)
predicted_class = torch.argmax(prediction)
# Check if the prediction is correct and update the 'correct' counter
if predicted_class == y:
correct += 1
counter += 1
# Print progress if applicable
if batch_idx % log_interval == 0:
print('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
epoch, batch_idx, size, 100. * batch_idx / size))
# Calculate and print accuracy
accuracy = correct / size
print(f"Correct predictions: {correct} out of {size}")
print(f"Accuracy: {accuracy:.2f}")
return accuracy
counter = 0
for eph in range(epochs):
print("Epoch: ",(eph+1)," ----------------------------------------------------")
training_loop(x_points, y_points,model,loss_fn, optim)
counter = 0
testing_loop(x_points, y_points, model, loss_fn,32,eph)
counter = 0
print("*Sigh*")
current_datetime = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
file_name = f"model_params_{current_datetime}.pt"
#torch.save(model.state_dict(), file_name)
#print("Saved as: ", file_name)
Thank you so much!