Hi all! How to solve following issue: element 0 of tensors does not require grad and does not have a grad_fn?
I want to find the solution to 2nd order ODE?
My code as follows:
def loss_fun(inputs,var):
n = inputs.size()[0]
loss = torch.tensor(0.0, requires_grad=True, dtype=torch.float64)
loss_fun.outputs = torch.zeros(inputs.size())
for i in range(n):
x = Variable(inputs[i], requires_grad=True)
output = sum_wrapper(x, var)
loss_fun.outputs[i] = output
dydx_1 = torch.autograd.grad(output, x, grad_outputs=torch.ones_like(output),
create_graph=True)[0]
dydx_2 = torch.autograd.grad(dydx_1, x, grad_outputs=torch.ones_like(dydx_1),
create_graph=True)[0]
eq1 = dydx_2 + output# y' = x^2+y^2-1
loss = loss.add(torch.mean(eq1**2))
# add the initial condition point
ic = sum_wrapper(torch.tensor([0.],dtype=torch.float64), var) # y(x=0) = 0.0
loss = loss.add(ic**2)
return loss
var = Variable(inits, requires_grad=True)
opt = torch.optim.Adam([var], lr=lr, eps=0.001)
losses = []
varss = []
for it in range(steps):
varss.append(var.detach().clone())
# Feedforward and backpropagation + step
opt.zero_grad()
loss = loss_fun(x_data, var)
losses.append(loss.item())
loss.backward()
opt.step()
print('it {}, loss {}'.format(it, loss.item()))