Why my code works only at boundary conditions?

Hello everyone, I have written a code to solve the Laplace equation using Physics-Informed Neural Networks (PINNs) on a 10000*4 domain. the left and right boundary conditions are u(0, y) = 5 and u(10000, y) = 1, respectively. When I run the code, the results are only displayed at the boundaries, but there are no contour lines inside the domain. Additionally, the top and bottom boundaries are specified as no-flow boundaries, but this is not reflected in the plot. Can anyone assist me in identifying the issue?
here is my code:
import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn

x_min, x_max, y_min, y_max = 0, 10000, 0, 4
y_range, x_range =np.linspace(0,4,5)[1:-1] , np.linspace(0, 10000, 12)[1:-1]
x_zeros = np.zeros(10)
y_zeros = np.zeros(3)
x_ones = np.ones(10)
y_ones = np.ones(3)

bc_left = np.vstack([y_zeros, y_range, 5y_ones]).T # Boundary left: h(x=0, y) = 5
bc_right = np.vstack([10000
y_ones, y_range, y_ones]).T # Boundary right: u(x=10, y) = 1

boundary_conditions = np.vstack([bc_left, bc_right])
x_bc = boundary_conditions[:, 0]
y_bc = boundary_conditions[:, 1]
u_bc = boundary_conditions[:, 2]

x_bc = torch.autograd.Variable(torch.from_numpy(x_bc).float(), requires_grad=True)
y_bc = torch.autograd.Variable(torch.from_numpy(y_bc).float(), requires_grad=True)
u_bc = torch.autograd.Variable(torch.from_numpy(u_bc).float(), requires_grad=True)
x_range = torch.autograd.Variable(torch.from_numpy(x_range).float(), requires_grad=True)
x_zeros = torch.autograd.Variable(torch.from_numpy(x_zeros).float(), requires_grad=True)
x_ones = torch.autograd.Variable(torch.from_numpy(x_ones).float(), requires_grad=True)

#collocation points in the domain are 30 uniformly selected points

nx, ny = (30, 2)
x = np.linspace(0, 10000, 12)[1:-1]
y = np.linspace(0, 4, 5)[1:-1]
xv, yv = np.meshgrid(x, y)
a=np.reshape(xv, (30, 1))
b=np.reshape(yv, (30, 1))
r = np.hstack((a,b))
x_collocation = r[:,0]
y_collocation = r[:,1]
x_collocation = torch.autograd.Variable(torch.from_numpy(x_collocation).float(), requires_grad=True)
y_collocation = torch.autograd.Variable(torch.from_numpy(y_collocation).float(), requires_grad=True)

class Net(nn.Module):
def init(self):
super(Net, self).init()
self.neurons_per_layer = 20
self.fc1 = nn.Linear(2, self.neurons_per_layer)
self.fc2 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc3 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc4 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc5 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc6 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc7 = nn.Linear(self.neurons_per_layer, self.neurons_per_layer)
self.fc8 = nn.Linear(self.neurons_per_layer, 1)
self.act_func = nn.Sigmoid()

def forward(self, x, y):
    inputs = torch.cat([x.reshape(-1, 1), y.reshape(-1, 1)], axis=1)
    output = self.act_func(self.fc1(inputs))
    output = self.act_func(self.fc2(output))
    output = self.act_func(self.fc3(output))
    output = self.act_func(self.fc4(output))
    output = self.act_func(self.fc5(output))
    output = self.act_func(self.fc6(output))
    output = self.act_func(self.fc7(output))
    output = self.fc8(output)
    return output

net = Net()
epochs = 1500
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
criterion = torch.nn.MSELoss()

PDE as a loss function, this function calculates the first and second derivatives, then the PDE-based loss

def f(x, y, net):
u = net(x, y)
u_x = torch.autograd.grad(u, x, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u))[0]
u_xx = torch.autograd.grad(u_x, x, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u_x))[0]
u_y = torch.autograd.grad(u, y, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u))[0]
u_yy = torch.autograd.grad(u_y, y, create_graph=True, retain_graph=True, grad_outputs=torch.ones_like(u_y))[0]
loss_f = u_xx + u_yy
return loss_f

def neumann_bc_loss(net, x, y, target_gradient_value):
u = net(x, y)
u_y = torch.autograd.grad(u, y, retain_graph=True, grad_outputs=torch.ones_like(u))[0]

neumann_loss = torch.nn.functional.mse_loss(u_y, target_gradient_value * torch.ones_like(u_y))
return neumann_loss

neumann_loss_top = neumann_bc_loss(net, x_range, 4*x_ones, 0)
neumann_loss_bottom = neumann_bc_loss(net, x_range, x_zeros, 0)

losses =

for epoch in range(epochs):
optimizer.zero_grad()
predictions_initial_bc = net(x_bc, y_bc)
mse_u = criterion(predictions_initial_bc.reshape(-1,), u_bc) # This is the loss from boundary and initial conditions
f_out = f(x_collocation, y_collocation, net)
mse_f = criterion(torch.zeros_like(f_out), f_out) # This is the PDE-based loss evaluated at the randomly sampled collocation points

loss = mse_u + mse_f + neumann_loss_bottom + neumann_loss_top
losses.append(loss.item())
loss.backward()
optimizer.step()
if epoch % 100 == 0:


    print(f'Epoch {epoch}/{epochs}: Loss = {loss.item()}')

Create meshgrid points for plotting

x_plot, y_plot = np.meshgrid(np.linspace(0, 10000, 100), np.linspace(0, 4, 100))

x_plot = torch.autograd.Variable(torch.from_numpy(np.reshape(x_plot, (-1, 1)).astype(np.float32)))

y_plot = torch.autograd.Variable(torch.from_numpy(np.reshape(y_plot, (-1, 1)).astype(np.float32)))

Get network predictions for meshgrid points

net.eval()

u_values = net(x_plot, y_plot).detach().numpy().reshape(100, 100)

Plotting

plt.figure(figsize=(10, 6))

plt.pcolormesh(x_plot[:,0].detach().numpy().reshape(100, 100), y_plot[:,0].detach().numpy().reshape(100, 100), u_values, shading=‘auto’, cmap=‘viridis’)

plt.colorbar()

plt.xlabel(‘x’)

plt.ylabel(‘y’)

plt.title(‘Surface Plot of u = net(x, y)’)

plt.show()

this is what I get: