Cant succeed in using backward or autograd

class call_option_MC(torch.nn.Module):
def init(self, nb_steps):
super(call_option_MC, self).init()
self.nb_steps = nb_steps
self.sqrt_dt = torch.sqrt(torch.tensor(1 / torch.randn(self.nb_steps).shape[0]))

#Return train set X, y, Z
def train_set(self, states):
    initial_price = states[:, 0]
    strike = states[:, 1]
    time_to_expiry = states[:, 2]
    implied_vol = states[:, 3]
    risk_free_rate = states[:, 4]

    dW = self.sqrt_dt * torch.randn(self.nb_steps * states.shape[0]).reshape(states.shape[0], self.nb_steps)
    implied_vol = states[:, 3]
    path = states[:, 0].clone()
    path_list = []
    for step in range(self.nb_steps):
        path += path * dW[:, step] * implied_vol

    final_price = path_list[-1]
    discount_factor = torch.exp(-risk_free_rate * time_to_expiry)

    option_payoff = torch.max(final_price - strike, torch.tensor(0.0))
    option_price = discount_factor * option_payoff

    return states, option_price

Hi everyone, I am new here and would need some help. I have this code above and would like to get a variable derivatives of dimension (states[0], states[1]) where the element (i,j) is the derivatives of option_price[i] with respect to states[i,j]

I tried using backward or autograd but keep getting errors …

Please post a minimum executable code snippet enclosed within ``` that reproduces your error along with the complete error message.