【Pytorch】This is a question about network output as nan

Unitary linear regression
this is net

class LinearRegression(nn.Module):
    def __init__(self):
        super(LinearRegression, self).__init__()
        self.linear = nn.Linear(1, 1)

    def forward(self, x):
        x = self.linear(x)
        return x

This is the entire code

import torch as t
from matplotlib import pyplot as plt
from IPython import display
import torch.nn as nn
from torch.autograd import Variable
t.manual_seed(1000)
def get_fake_data(batch_size = 800):
    """y=2*x+3,生成x,并加入噪声"""
    x = t.rand(batch_size, 1) * 20
    y = 2 * x + 3 + t.randn(batch_size, 1)
    return x, y


x, y = get_fake_data()
plt.scatter(x.numpy(), y.numpy())
#  随机初始化参数
w = t.randn(1, 1)
b = t.zeros(1, 1)
lr = 0.001  # learning rate


class LinearRegression(nn.Module):
    def __init__(self):
        super(LinearRegression, self).__init__()
        self.linear = nn.Linear(1, 1)

    def forward(self, x):
        x = self.linear(x)
        return x


print(x[1],y[1])

Thank you for your help

Could you describe your problem more clearly? I can run the following code and get the correct result.

import torch as t
import numpy as np
from matplotlib import pyplot as plt
from IPython import display
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim

t.manual_seed(1000)
def get_fake_data(batch_size = 800):
    x = t.rand(batch_size, 1) * 20
    y = 2 * x + 3 + t.randn(batch_size, 1)
    return x, y


x, y = get_fake_data()
w = t.randn(1, 1)
b = t.zeros(1, 1)
lr = 0.001  # learning rate


class LinearRegression(nn.Module):
    def __init__(self):
        super(LinearRegression, self).__init__()
        self.linear = nn.Linear(1, 1)

    def forward(self, x):
        x = self.linear(x)
        return x


net = LinearRegression()
net.train()
loss_function = nn.L1Loss()
opt = optim.Adam(net.parameters())
bs = 20
for ep in range(1000):
    ep_loss = list()
    for i in range(0, 40):
        opt.zero_grad()
        y_pred = net(x[i*20:(i+1)*20, ...])
        loss = loss_function(y[i*20:(i+1)*20, ...], y_pred)
        loss.backward()
        ep_loss.append(loss.item())
        opt.step()
    if ep % 100 == 0:
        print(np.mean(ep_loss))

test_x, test_y = get_fake_data(batch_size = 40)
with t.no_grad():
    net.eval()
    y_pred = net(test_x)
    print((y_pred - test_y).abs().mean())   # 0.6565