Thank you again for your comment. I am almost there to understand the issue.
May I ask you again how can I use torch.autograd.gradcheck for future reference?
You mentioned above
grad_chk = torch.autograd.gradcheck(
eval_g, torch.tensor([k], dtype=torch.double, requires_grad=True))
But it gives an error…
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/Dropbox/tmp_pytorch.py in <module>
156 # grad = eval_jac_g(np.array([k]), False)
157 grad_chk = torch.autograd.gradcheck(
--> 158 eval_g, torch.tensor([k], dtype=torch.double, requires_grad=True))
159 print(grad_chk)
160
~/.pyenv/versions/anaconda3-5.3.0/envs/adapt/lib/python3.7/site-packages/torch/autograd/gradcheck.py in gradcheck(func, inputs, eps, atol, rtol, raise_exception, check_sparse_nnz, nondet_tol)
258
259 func_out = func(*tupled_inputs)
--> 260 output = _differentiable_outputs(func_out)
261
262 if not output:
~/.pyenv/versions/anaconda3-5.3.0/envs/adapt/lib/python3.7/site-packages/torch/autograd/gradcheck.py in _differentiable_outputs(x)
182
183 def _differentiable_outputs(x):
--> 184 return tuple(o for o in _as_tuple(x) if o.requires_grad)
185
186
~/.pyenv/versions/anaconda3-5.3.0/envs/adapt/lib/python3.7/site-packages/torch/autograd/gradcheck.py in <genexpr>(.0)
182
183 def _differentiable_outputs(x):
--> 184 return tuple(o for o in _as_tuple(x) if o.requires_grad)
185
186
AttributeError: 'numpy.ndarray' object has no attribute 'requires_grad'
How should I fix it?
The function that I want to check gradient is defined as
def eval_g(x):
""" The system of non-linear equilibrium conditions
x[0]: Capital stock in the next period
"""
assert len(x) == nvar
# Consumption today
con = a * k**alpha * ls**(1-alpha) - x[0]
# Labor supply tomorrow
# ls_plus = np.empty(x5.shape)
ls_plus = torch.empty(x5.shape)
for aplus_idx, aplus_val in enumerate(aplus):
ls_plus[aplus_idx] = ls_compute(
k=x[0], A=aplus_val, alpha=alpha, psi=psi, theta=theta)
# Capital stock tomorrow
k_plusplus = torch.empty(aplus.shape)
for aplus_idx, aplus_val in enumerate(aplus):
# state_plus = torch.tensor([x[0], aplus_val])[None, :].float()
k_plusplus[aplus_idx] = torch.tensor(k) # Needs to be modified
# Consumption tomorrow
con_plus = aplus * x[0]**alpha * ls_plus**(1-alpha) - k_plusplus
# ----------------------------------------------------------------------- #
# Euler equation
# ----------------------------------------------------------------------- #
g0 = 1 / con - beta * alpha * torch.sum(omega5 * (
1 / con_plus * aplus * x[0]**(alpha-1) * ls_plus**(1-alpha)))
return np.array([g0])