I have some basic doubts.
- If loss.backward() is done, there shouldn’t be requires_grad=True while creating x_data or y_data?
- In line y_pred = model(x_data), How does this call forward function, In init , x_data is not taken as parameter or forward function is not called. How come is it is returning predicted y?
- What does model(hour_var).data[0][0].item() Mean in the last statement?
Thanks
from torch import nn
import torch
from torch import tensor
x_data = tensor([[1.0], [2.0], [3.0]])
y_data = tensor([[2.0], [4.0], [6.0]])
class Model(nn.Module):
def init(self):
super(Model, self).init()
self.linear = torch.nn.Linear(1, 1) # One in and one out
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(reduction=‘sum’)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(500):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(f’Epoch: {epoch} | Loss: {loss.item()} ')
optimizer.zero_grad()
loss.backward()
optimizer.step()
After training
hour_var = tensor([[4.0]])
y_pred = model(hour_var)
print(“Prediction (after training)”, 4, model(hour_var).data[0][0].item())