I’m not sure if this is the right place to post this question, but I’m trying to do something like this:
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size):
super(NeuralNet, self).__init__()
self.layers = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size//2),
nn.ReLU(),
nn.Linear(hidden_size//2, 1),
)
def forward(self, x1, x2):
out1 = self.layers(x1)
out2 = self.layers(x2)
out1 = F.sigmoid(out1)
out2 = F.sigmoid(out2)
return out1, out2
model = NeuralNet(input_size, hidden_size)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
In the training loop:
output1, output2 = model(input1, input2)
if output1> output2:
label = torch.ones([1,1])
else:
label = torch.zeros([1,1])
loss = criterion(output1, output2, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
Doing something like below does not seem to work:
criterion = nn.BCELoss()
output = output1>output2
output.requires_grad=True
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
Is there a custom loss function that does something like this? Or is BCELoss not a good choice of loss function in this case? I’m trying to train the model to do something like ranking given two input.