# Calculate jacobian on prediction net

Hi everyone, I’m new to PyTorch and neural networks. I’m working on PINNs and I want to calculate the Jacobian of Energy (E) with respect to displacement (the output of my network), or equivalently, the derivative of Energy with respect to displacement,

My code:

``````def StrainEnergy(self):

E=self.E
nu=self.nu

x=self.X_domain[:,0].reshape(-1,1)
y=self.X_domain[:,1].reshape(-1,1)

X=torch.hstack((x,y))
U = self.uPred(X)
u,v=U[:,[0]],U[:,[1]]

def func(u,v):

sxx = self.E / (1 - self.nu**2) * (ex + self.nu * ey)
syy = self.E / (1 - self.nu**2) * (self.nu * ex +  ey)
sxy = self.E / (2 * (1 + self.nu)) * (exy)
En = 1/2*torch.multiply(sxx,ex)+1/2*torch.multiply(syy,ey)+1/2*torch.multiply(sxy,exy)
return En.sum()

#EnD=1/2*torch.multiply(sxx,ex)+torch.multiply(syy,ey)+torch.multiply(sxy,exy)
#IntEnd=torch.sum(EnD)/EnD.shape[0]*self.Area

vE=jacobian(func,tuple([u,v]),create_graph=True)
print(vE)
#------------------------------------
out_inps = grad(outs, inps, torch.ones_like(inps).to(device),allow_unused=True, create_graph=True, retain_graph=True)[0]
return out_inps
``````

Getting the gradients to be all zeros, usually means you’ve disconnected your graph. So check for any `detach()` calls.

Also, do make sure to share a minimal reproducible example and not just the `StrainEnergy` function, as that’ll make it easier to find the error.

Hi, code isn’t complete, but it is this:

``````import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
torch.set_default_dtype(torch.float)
torch.manual_seed(12345)
np.random.seed(1234)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
if device == 'cuda':
print(torch.cuda.get_device_name())

class EnergyPinns(nn.Module):
def __init__(self,inps,units,outs,E=None,nu=None,rou=None):

super().__init__() #call __init__ from parent class
"""
Parameters
----------

E        : TYPE     float.
nu       : TYPE     float
rou :      float, optional
"""
self.E=E
self.nu=nu
self.mu = E / (2 * (1 + nu))
self.lam = (E * nu) / ((1 + nu) * (1 - 2 * nu))
self.inps=inps
self.outs=outs
self.units=units
'activation function'
self.activation = nn.Tanh()
'loss function'
self.loss_function = nn.MSELoss(reduction='sum')

'Initialise neural network as a list using nn.Modulelist'

# Create Architecture
self.Sequetial = nn.Sequential(nn.Linear(inps,units),
nn.Tanh(),
nn.Linear(units,units),
nn.Tanh(),
nn.Linear(units,units),
nn.Tanh(),
nn.Linear(units,units),
nn.Tanh(),
nn.Linear(units,units),
nn.Tanh(),
nn.Linear(units,units),
nn.Tanh(),
nn.Linear(units,units),
nn.Tanh(),
nn.Linear(units,outs))

self.iter = 0

'Xavier Normal Initialization'
# std = gain * sqrt(2/(input_dim+output_dim))
for m in self.Sequetial.modules():
if isinstance(m,nn.Linear):
nn.init.normal_(m.weight,mean=0, std=np.sqrt(2/(units+units)))
nn.init.zeros_(m.bias)
nn.init.normal_(self.Sequetial[0].weight,mean=0, std=np.sqrt(2/(inps+units)))
nn.init.normal_(self.Sequetial[-1].weight,mean=0, std=np.sqrt(2/(units+outs)))

'foward pass'
def forward(self,x):

Y=self.Sequetial(x)
#v=self.Sequetial2(x)

return Y

def uPred(self,x):
Y=self.Sequetial(x)

Ux = x[:, 0] * Y[:, 0]
Uy = x[:, 0] * Y[:, 1]

Ux = Ux.reshape(Ux.shape[0], 1)
Uy = Uy.reshape(Uy.shape[0], 1)

#v=self.Sequetial2(x)
u=torch.cat((Ux, Uy), -1)
return u

def DefineDomain(self,x,y,t):
self.Area=torch.tensor([x[1]*y[1]],dtype=torch.float32,device=device)
self.ldc=y[1]
x = np.linspace(x[0],x[1],num=x[2])
y = np.linspace(y[0],y[1],num=y[2])
X,Y = np.meshgrid(x,y)

#-------------------------------------------------------------------------
#                           Neumann Condition
#-------------------------------------------------------------------------

x_Neu = torch.from_numpy(X[X==x.min()]).reshape(-1,1).float().to(device)
y_Neu = torch.from_numpy(Y[X==x.min()]).reshape(-1,1).float().to(device)
self.X_neu  = torch.hstack([x_Neu,y_Neu]).to(device)

#-------------------------------------------------------------------------
#                           Dirichlet Condition
#-------------------------------------------------------------------------
x_dirC = torch.from_numpy(X[X==x.max()]).reshape(-1,1).float().to(device)
y_dirC = torch.from_numpy(Y[X==x.max()]).reshape(-1,1).float().to(device)
self.X_dirC = torch.hstack([x_dirC,y_dirC]).float().to(device)
#print(self.X_dirC)
self.t=t
#self.t=torch.matmul(torch.ones_like(self.X_dirC),t)

#-------------------------------------------------------------------------
#                                 Domain
#-------------------------------------------------------------------------

#pl=plt.scatter(self.X_domain[:,0].cpu().detach().numpy(),self.X_domain[:,1].cpu().detach().numpy(),color='green')
#pl=plt.scatter(self.X_dirC[:,0].cpu().detach().numpy(),self.X_dirC[:,1].cpu().detach().numpy(),color='blue')
#pl=plt.scatter(self.X_neu[:,0].cpu().detach().numpy(),self.X_neu[:,1].cpu().detach().numpy(),color='red')
#plt.show()

def StrainEnergy(self):
# delete just to try
E=self.E
nu=self.nu

x=self.X_domain[:,0].reshape(-1,1)
y=self.X_domain[:,1].reshape(-1,1)

X=torch.hstack((x,y))
U = self.uPred(X)
u,v=U[:,[0]],U[:,[1]]

def func(u,v):

sxx = self.E / (1 - self.nu**2) * (ex + selfxnu * ey)
syy = self.E / (1 - self.nu**2) * (self.nu * ex +  ey)
sxy = self.E / (2 * (1 + self.nu)) * (exy)
En = 1/2*torch.multiply(sxx,ex)+1/2*torch.multiply(syy,ey)+1/2*torch.multiply(sxy,exy)
return En

#EnD=1/2*torch.multiply(sxx,ex)+torch.multiply(syy,ey)+torch.multiply(sxy,exy)
#IntEnd=torch.sum(EnD)/EnD.shape[0]*self.Area

vE=jacobian(func,tuple([u,v]),create_graph=True)
print(vE)
#SEnd=self.My_Jacob(EnD,ex[10])

pass

#strainEnergy = 0.5*lambda/2*(ln(det(F)))-mu/2*ln(det(F))+
#return EnD

def Functional(self,i):
phi=self.StrainEnergy()
PHI = torch.sum(phi)*self.Area/self.X_domain.shape[0]
u_dirc=self.uPred(self.X_dirC)
u_neu=self.uPred(self.X_neu)
#print(self.X_neu)
bc_neu=torch.zeros_like(u_neu)
fbc = torch.matmul(u_dirc,self.t)
#fbc = torch.multiply(u_dirc[:,1],self.t[1][0])
#print(u_dirc)
#print(torch.matmul(u_dirc,self.t))

FBC = torch.sum(fbc)/fbc.shape[0]*self.ldc
Neu=self.loss_function(u_neu.reshape(-1,1),bc_neu.reshape(-1,1))
print(i,(PHI-FBC)**2,PHI,FBC,Neu)
def Montecarlo2D(self,X,Y):
pass

def train(self):
"""
self.optimizer = torch.optim.LBFGS(self.Sequetial.parameters(),
lr=0.01,
max_iter = 1000,
max_eval = None,
tolerance_change = 1e-9,
history_size = 10000,
)
#start_time = time.time()
self.optimizer.step(self.closure)
"""
self.optimizer2 = torch.optim.SGD(self.Sequetial.parameters(),
lr=0.00001,momentum=0.1)
for i in range(6000):
loss=self.Functional(i)
loss.backward()
self.optimizer2.step()
#"""

#start_time = time.time()

out_inps = grad(outs, inps, torch.ones_like(inps).to(device),allow_unused=True, create_graph=True, retain_graph=True)[0]
return out_inps

def My_Jacob(self,outs,inps):
out_inps = grad(outs, inps, torch.ones_like(inps).to(device), create_graph=True, retain_graph=True)[0]

return out_inps

def closure(self):
#lbc=self.loss_BC(xx)
loss=self.Functional()
loss.backward()
self.iter += 1
#print(loss)
return loss

def evalaute(self,r):
u=self.uPred(self.X_domain)
udc=self.uPred(self.X_dirC)
print(self.X_neu)
print(udc)

U,V = u[:,0],u[:,1]
up,vp=U.cpu().detach().numpy().reshape(r,r),V.cpu().detach().numpy().reshape(r,r)

fig, ax = plt.subplots(2,1,figsize=(5, 10))
pl=ax[0].imshow(up,extent=[0, 10, 0, 1], cmap=plt.cm.rainbow,interpolation='nearest')
pl=ax[1].imshow(vp,extent=[0, 10, 0, 1], cmap=plt.cm.rainbow,interpolation='nearest')

plt.show()

train_p = 0
nepoch = 10000 # the float type
gama = 0.2
E = 70
nu = 0.3
nx = 400
ny = 100
t = torch.tensor(np.array([[0],
[1]
]),dtype=torch.float32,device=device)
r=100
m=EnergyPinns(2,50,2,E,nu)
print(m)
m.DefineDomain([0,4,r],[0,1,r],t)
m.train()
m.evalaute(r)
``````

The scope is to calculate jacobian of E respect u,v, they are output of net,to minimize Energy and use Network like function of interpolation, in similar approach of finite element; but how I said I got all zeros. I tried to use requires_grad_(True) on u and v, and but it doesn’t work.Again, I tried to make predictioni directly in the func, but in this way I have to use x, y how input in Jacobian function, so I have Jacobian respect x,y.