Thank you both for your answer, i see it clearly now.
here i use the -= operator and i need to zero the gradient graph in order to loop the gradient computation.
lr=0.02
b=torch.tensor([0.7], dtype=torch.float32, requires_grad=True) #bias
w=torch.tensor([0.3,-0.8], dtype=torch.float32, requires_grad=True) #weights
inputs=torch.tensor([[0.5, 1.2],[-0.8, 0.6]])
y=torch.tensor([1],dtype=torch.float32)
for i in range (5):
print("Iteration=",i)
y_hat=h_calculate(inputs,w,b)
y_hat_final=torch.sigmoid(y_hat)
loss=error(y_hat_final,y)
loss.backward()
print(loss)
with torch.no_grad():
#w=w-lr*w.grad
#b=b-lr*b.grad
#print(w.requires_grad)
w-=lr*w.grad
b-=lr*b.grad
#print(w.requires_grad)
print(w)
print(b)
#print(w.grad)
#print(b.grad)
w.grad.zero_()
b.grad.zero_()
but here i don’t anymore need to zero gradient graph since w and b are pointing on a new Tensor, i just have to set requires_grad to True, to be able to compute the gradient in the next loop.
b=torch.tensor([0.7], dtype=torch.float32, requires_grad=True) #bias
w=torch.tensor([0.3,-0.8], dtype=torch.float32, requires_grad=True)
y_hat=h_calculate(inputs,w,b)
y_hat_final=torch.sigmoid(y_hat)
loss=error(y_hat_final,y)
for i in range (5):
print("i=",i)
y_hat=h_calculate(inputs,w,b)
y_hat_final=torch.sigmoid(y_hat)
loss=error(y_hat_final,y)
loss.backward()
print(loss)
with torch.no_grad():
w=w-lr*w.grad
b=b-lr*b.grad
w.requires_grad=True
b.requires_grad=True
#w-=lr*w.grad
#b-=lr*b.grad
print(w)
print(b)
print(w.grad)
print(b.grad)