Dear community,
I’m facing some difficulties again with the autograd.grad
calculations.
Basically, I have a function f
that should return me the combination of several terms as a result of derivatives of second and first order (equation of motion). However, it is not evaluating the first derivatives of u
since returns nonetype
.
I have tried to set the allow_unused
flag to True
, but still stuck.
Could anyone suggest to me how to proceed?
# Time vector
t_span, dt = (0,40), 0.01
t_eval = np.arange(t_span[0], t_span[1], dt)
# Calibrated parameters
F0, omega = 1.5, 10
n, uy = 5, 0.04
m, c, k = 3.6*10**4, 8.05*10**4, 1.5*10**4
alpha,A,beta,gamma = 0.1,1.0, 1/((2*uy)**n), 1/((2*uy)**n)#0.1,1.15
params_osc = [m, c, k, uy]
params_bw = [alpha, A, n, beta, gamma]
bwmodel_params = params_bw, params_osc
# u, udot, z solutions
sol = u(t,bwmodel_params)
# f = f(u,udot,z)
f_eq = f(t,bwmodel_params)
where I defined:
def f(t,params):
return f_PDE_true(t,params)
def f_PDE_true(t, params, plotDebug=None):
params_bw, params_osc = params
[m, c, k, uy] = params_osc[0], params_osc[1], params_osc[2], params_osc[3]
[alpha, A, n, beta, gamma] = params_bw[0], params_bw[1], params_bw[2], params_bw[3], params_bw[4]
t = ensure_tensor(t) # equivalent of: t= torch.tensor(t, dtype = torch.float64)
t = t.view(-1,1).requires_grad_(True).to('cpu')
sol = oscillator_bw(t, params)
u = sol.y[0]; u = ensure_tensor(u)
u = u.view(-1,1).requires_grad_(True).to('cpu')
print(u.shape,t.shape)
u_c = u;
print(u, t)
u_t_c = torch.autograd.grad(u_c, t, torch.ones_like(u_c ) , create_graph=True,allow_unused=True)[0]# computes du/dt
print(u_t_c)
u_tt_c = torch.autograd.grad(u_t_c, t, torch.ones_like(u_t_c), create_graph=True, allow_unused=True)[0]# computes d^2u/dt^2
return m/9.81*u_tt_c + c * u_t_c + alpha * k * u_c + (1-alpha)*k*z
def u(t, params):
return oscillator_bw(t, params)
def oscillator_bw(t, params):
params_bw, params_osc = params
[m, c, k, uy] = params_osc[0], params_osc[1], params_osc[2], params_osc[3]
[alpha, A, n, beta, gamma] = params_bw[0], params_bw[1], params_bw[2], params_bw[3], params_bw[4]
# External force
F0, omega = 1.5, 10
f_hz = 1/(2*np.pi) * (k/m)**(0.5) # Hz
omega = 2 * np.pi *f_hz # rad/s
print(f'fns{f_hz, omega}')
f = lambda t: F0*m/9.81 * np.sin(omega * t) # Amplitude 0.1, frequency 0.1 Hz
# Initial conditions
y0 = [0, 0, 0] # Initial displacement and velocity
uy = 0.04
alpha,A,beta,gamma = 0.1,1,1/(2*uy)**n,1/(2*uy)**n
params_osc = [m/9.81, c, k, uy]
params_bw = [alpha, A, n, beta, gamma]
params = [params_bw, params_osc]
# Solve ODE
sol = solve_ivp(lambda t, y: bouc_wen(t, y, f(t), params), t_span, y0, t_eval=t_eval)
return sol
def bouc_wen(t, y, f, params):
# Extract parameters
params_bw, params_osc = params[0], params[1]
m, c, k, uy = params_osc[0], params_osc[1], params_osc[2], params_osc[3]
alpha, A, n, beta, gamma = params_bw[0], params_bw[1], params_bw[2], params_bw[3], params_bw[4]
# Convert inputs to PyTorch tensors if they aren't already
y = torch.tensor(y, dtype=torch.float32)
f = torch.tensor(f, dtype=torch.float32)
x, v, z = y[0], y[1], y[2]
# Compute derivatives
dxdt = v
dvdt = (1/m) * (f - c*v - alpha*k*x - (1 - alpha)*k*z)
dzdt = (A - torch.abs(z)**n * (beta + torch.sign(v*z) * gamma)) * v
return torch.stack([dxdt, dvdt, dzdt])
it gives me this error:
Cell In[16], line 94, in f_PDE_true(t, params, plotDebug)
92 u_t_c = torch.autograd.grad(u_c, t, torch.ones_like(u_c ) , create_graph=True,allow_unused=True)[0]# computes du/dt
93 print(u_t_c)
---> 94 u_tt_c = torch.autograd.grad(u_t_c, t, torch.ones_like(u_t_c), create_graph=True, allow_unused=True)[0]# computes d^2u/dt^2
96 return m/9.81*u_tt_c + c * u_t_c + alpha * k * u_c + (1-alpha)*k*z
TypeError: ones_like(): argument 'input' (position 1) must be Tensor, not NoneType