I have this simple nonlinerar function that I want to fit data, but loss function keeps increasing after each iteration. Any idea what I am doing wrong?

```
import torch
# w2=2, w1=3
b = 5
x_data = []
y_data = []
for i in range(20):
i = float(i+1)
x_data.append(i)
# y = w2*x^2 + w1*x
y_data.append(i * i * 2 + i * 3)
w1 = torch.tensor([5.0], requires_grad=True)
w2 = torch.tensor([10.0], requires_grad=True)
# our model forward pass
def forward(x):
return x * x * w2 + x * w1
# Loss function
def loss(y_pred, y_val):
return (y_pred - y_val) ** 2
# Before training
print("Prediction (before training)", 4, forward(4).item())
# Training loop
for epoch in range(100):
for x_val, y_val in zip(x_data, y_data):
y_pred = forward(x_val) # 1) Forward pass
l = loss(y_pred, y_val) # 2) Compute loss
l.backward() # 3) Back propagation to update weights
# print("\tgrad: ", x_val, y_val, w.grad.item())
w1.data = w1.data - 0.01 * w1.grad.item()
w2.data = w2.data - 0.01 * w2.grad.item()
# Manually zero the gradients after updating weights
w1.grad.data.zero_()
w2.grad.data.zero_()
print(l.item())
# After training
print("Prediction (after training)", 4, forward(4).item())
```

result:

625.0

3969.0

4106.24658203125

10951.541015625

494030.0

146011072.0

176458432512.0

690122123116544.0

7.44382236290397e+18

1.967258245916323e+23

1.1615622546788777e+28

1.4223510151775662e+33