my logistic code error I don't know to what to do (初学者求助贴)

import torch
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as F

with open(‘data.txt’) as f:
data_list = [i.split(’\n’)[0].split(’,’) for i in f.readlines()]
data = [(float(i[0]),float(i[1]),float(i[2])) for i in data_list]

x0_max = max([i[0] for i in data])
x1_max = max([i[1] for i in data])
data = [(i[0]/x0_max,i[1]/x1_max,i[2])for i in data]

x0 = list(filter(lambda x:x[-1]==0.0,data))
x1 = list(filter(lambda x:x[-1]==1.0,data))

plot_x0 = [i[0] for i in x0]
plot_y0 = [i[1] for i in x0]
plot_x1 = [i[0] for i in x1]
plot_y1 = [i[1] for i in x1]

#plt.plot(plot_x0,plot_y0,‘ro’,label=‘x_0’)
#plt.plot(plot_x1,plot_y1,‘bo’,label=‘x_1’)
#plt.legend(loc=‘best’)

x_data = [(i[0],i[1]) for i in data]
y_data = [(i[2]) for i in data]
x_data = torch.tensor(x_data)
y_data = torch.tensor(y_data)#.unsqueeze(1)

class Net(torch.nn.Module):
def init(self,n_in,n_hide,n_out):
super(Net,self).init()
self.hide = torch.nn.Linear(n_in,n_hide)
self.out = torch.nn.Linear(n_hide,n_out)
def forward(self,x):
x = F.relu(self.hide(x))
x = self.out(x)
return x
net = Net(n_in=2,n_hide=10,n_out=2)

loss_func = torch.nn.BCELoss()
optim = torch.optim.SGD(net.parameters(),lr = 0.2,momentum=0.9)

print(x_data.shape)
print(y_data.shape)

for i in range(1000):
pre = net(x_data)

_, predicted = torch.max(pre.data, 1)

print(pre.shape)
loss = loss_func(pre,y_data)

optim.zero_grad()
loss.backward()
optim.step()
#print(loss.item())

“”“
data.txt
34.62365962451697,78.0246928153624,0
30.28671076822607,43.89499752400101,0
35.84740876993872,72.90219802708364,0
60.18259938620976,86.30855209546826,1
79.0327360507101,75.3443764369103,1
45.08327747668339,56.3163717815305,0
61.10666453684766,96.51142588489624,1
75.02474556738889,46.55401354116538,1
76.09878670226257,87.42056971926803,1
84.43281996120035,43.53339331072109,1
95.86155507093572,38.22527805795094,0
75.01365838958247,30.60326323428011,0
and so on (还有很多行)
”“”

You need a sigmoid layer as the output of your model for the BCELoss.
Alternatively, you could keep your model as it is currently and use BCEWithLogitsLoss.

在计算loss之前,添加pre = F.sigmoid(pre)

Thanks, i had resolved this question~ but i want to know that someone said that output layer should’t be activated
我已经解决了,但是我还想问一下 输出层不是应该不激活吗?

nn.BCELoss expects probabilities from a sigmoid layer, while nn.BCEWithLogitsLoss expects the raw logits.

thanks,i got it.
谢谢你,我明白啦~