Solving euler beam by pinns

Hi, I’m new to using PINNs, and I’m attempting to solve the Euler beam equation with them. However, I’m unable to achieve a similar solution to the analytical, and I can’t understand where I’ve gone wrong. Therefore, I’m reaching out for help. I suspect that the issue may lie in how I’ve calculated the gradients, or perhaps I need to normalize something for my model to work properly.

The problem is d4w/dx4=q(x), with BC w(0)=0,w’(0)=0, w’‘(L)=0, w’‘’(L)=0 [ A simple clamped beam]
.
Here is my code

from itertools import filterfalse
from pickle import FALSE
from tkinter import FLAT
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt

So we solve d4w/dx4-q(x)=0

device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)
print(f"Using {device} device")

Defining class to create neural networks from torch.nn.Modules as nn

class NeuralNetwork(nn.Module):
def init(self):
super().init()
self.flatten=nn.Flatten()
self.linear_stack = nn.Sequential(
nn.Linear(1,10), # Send one input x
nn.Tanh(),
nn.Linear(10, 10),
nn.Tanh(),
nn.Linear(10, 10),
nn.Tanh(),
nn.Linear(10, 10),
nn.Tanh(),
nn.Linear(10, 1),# retunr one output y
)

def forward(self,x):
    
    x=self.flatten(x)
    return self.linear_stack(x)

model=NeuralNetwork().to(device)

print(model)

define loss estimate errore to minimize

lossd=nn.MSELoss()

define optimizer alghoritm

optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

define routine of training

BC

x0=torch.tensor(np.array([[0.0001]])).float()
print(x0.size())
x0.requires_grad=True
xl=torch.tensor(np.array([[1]])).float()
print(xl.size())
xl.requires_grad=True
print(x0.size())
print(xl.size())

def pde(x,model):

u=model(x) # compute prediction by net
u.size()
u_x  = torch.autograd.grad(u, x, torch.ones_like(u),retain_graph=True, create_graph=True)[0]

u_xx  = torch.autograd.grad(u_x, x, torch.ones_like(u_x), retain_graph=True,create_graph=True)[0]

u_xxx  = torch.autograd.grad(u_xx, x, torch.ones_like(u_xx), retain_graph=True,create_graph=True)[0]
 
u_xxxx  = torch.autograd.grad(u_xxx, x, torch.ones_like(u_xxx),retain_graph=True, create_graph=True)[0]

return u,u_x,u_xx,u_xxx,u_xxxx

E = 1 # Young’s modulus ¶
I = 1 # Moment of inertia (m^4)
q0=-0.001
xl.to(device)
#print(xl.size())
x0.to(device)

Compute derivate to impose bC

#generate collocation point
X = np.linspace(0.0001,1,num=100)
X=np.reshape(X,(-1,1))
qxx=np.ones_like(X)*q0
qx=torch.from_numpy(qxx).float()
qx.require_grad=False
qx.to(device)

#print(qx.require_grad)
#print(qx.size())

X=torch.tensor(X, requires_grad=True).float().to(device)
nepoch=20000
stop=True
epoch=1
while stop:
optimizer.zero_grad()
# Valutate model on BC
bc1,bc2,,,_=pde(x0,model) # u’0=0
,,bc3,bc4,bbb=pde(xl,model) # u(l)‘’=0

#print(X.size())
_,_,_,_,f=pde(X,model) 
#print(f.size())
#print(qx.size())
#print(uxxxx)
loss2=torch.mean(bc1**2)+torch.mean(bc2**2)+torch.mean(bc3**2)+torch.mean(bc4**2)
#print(f)
#print(bbb)
loss1=lossd(f,qx)
loss=loss1+loss2
loss.backward()


optimizer.step() # This is equivalent to : theta_new = theta_old - alpha * derivative of J w.r.t theta
with torch.autograd.no_grad():
	print(f"Traning Loss: {epoch}", loss.data)
epoch+=+1
if loss<1e-6:
    stop=False

#y,yx,yxx,yxxx,yxxxx=pde(X,model)
model.eval()

yh = model(X).detach().cpu().numpy().reshape(-1,1)
x=np.linspace(0.001,1,num=100)
print(yh)
L=1

yhat = q0x**2/(24EI)(6L**2-4L*x+x**2)

plt.figure(figsize = (10,8))
plt.plot(x,yh,‘-k’)
plt.plot(x,yhat,‘-r’)

plt.show()