Hi Pytorch community,
I’m new to Pytorch and relatively new to neural networks.
What I want to build is a network simulating a human learning task, where a stimulus of 2 dimensions with different SNRs maps onto a binary response. I have thus created my binary target vector (y) and an input vector (x) with the mean shifted positive/negative depending on the target response.
The network doesn’t seem to learn - the accuracy stays at 50% and the loss also only decreases marginally. I have played with the parameters (learning rate, weight initialisation etc), but nothing changed. I’ve now been stuck at this point for days and couldn’t find any help in the discussion forum so far, so I’d really appreciate any advice on what I’m doing wrong!
Thanks a lot.
Here’s my code:
y = torch.empty(10000,1, dtype=torch.float).random_(2)
batchlen = torch.Tensor.nelement(y)
x = torch.empty(10000,1, dtype=torch.float)
for t in range(batchlen):
if y[t] == 0:
x[t] = torch.randn(1)*(-1)
elif y[t] == 1:
x[t] = torch.randn(1)
sig_m = 1.3
sig_c = .1
x_m = xsig_m
x_c = xsig_c
x = torch.cat((x_m,x_c),1)
x.requires_grad=True
class Network(nn.Module):
def init(self):
super().init()
self.hidden = nn.Linear(2,100)
self.output = nn.Linear(100,1)
self.relu = nn.ReLU()
self.softmax = nn.Sigmoid()
def weights_init(self):
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean = 0, std = 0.1)
def forward(self, x):
x = self.hidden(x)
x = self.relu(x)
x = self.output(x)
x = self.softmax(x)
return(x)
model = Network()
model.weights_init()
criterion = nn.BCELoss()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
def binary_acc(pred, target):
pred_tag = torch.round(torch.sigmoid(pred))
correct_results_sum = (pred_tag == target).sum().float()
acc = correct_results_sum/target.shape[0]
acc = torch.round(acc * 100)
return acc
loss_all = np.zeros((batchlen))
acc_all = np.zeros((batchlen))
for e in range(batchlen):
running_loss = 0
running_acc = 0
out = model(x)
loss = criterion(out,y)
acc = binary_acc(out,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
running_acc += acc.item()
acc_all[e] = acc.item()
loss_all[e] = loss.item()
else:
print(f"Training loss: {running_loss}")
print(f"Training accuracy: {running_acc}")