I am getting accuracy '0.0' each time. Why ? Thankyou in advance

class tabular_model(nn.Module):
def init(self):
super().init()
self.input = nn.Linear(13, 250)
self.hidden1 = nn.Linear(250, 250)
self.hidden2 = nn.Linear(250, 500)
self.output = nn.Linear(500, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.25)

def forward(self, x):
x = self.relu(self.input(x))
x = self.dropout(x)
x = self.relu(self.hidden1(x))
x = self.dropout(x)
x = self.relu(self.hidden2(x))
x = self.dropout(x)
x = self.output(x)
return x

net = tabular_model()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum = 0.9)
loss_func = nn.MSELoss()

def functiontotrain():

theBestModel = {“Accuracy”:0, “net”:None}

losses = np.zeros(numepochs)
trainAcc = []
testAcc = []

for epochi in range(numepochs):

print(epochi, end=', ')

# net.to(device)
net.train()

for X,y in train_dataloaders:

  X = X.to(device)
  y = y.to(device)

  batchLoss = []
  batchAcc = []

  yHat = net(X)
  loss = loss_func(yHat, y)

  optimizer.zero_grad()
  loss.backward()
  optimizer.step()

  batchLoss.append(loss.item())
  batchAcc = (100*torch.mean((yHat == y).float()).item())

trainAcc.append(np.mean(batchAcc))

losses[epochi] = np.mean(batchLoss)

if trainAcc[-1] > theBestModel['Accuracy']:
  theBestModel['net'] = trainAcc[-1].item()

  theBestModel['net'] = copy.deepcopy(net.state_dict())

return trainAcc, losses, theBestModel

Based on your code it seems you are comparing floating point values directly:

batchAcc = (100*torch.mean((yHat == y).float()).item())

which will often show mismatches due to the limited floating point precision.
Could you explain your use case a bit more and what the output and target represents?
If you are working on a classification use case you might need to create predicted class labels from the output before comparing the result.