This is how I train my model. Where can I subtract from target tensor.
def train(self, X_train, Y_train, X_test, Y_test):
tensor_train_x = torch.Tensor(X_train.reshape(-1,1,32,32)) # transform to torch tensor
tensor_train_y = torch.Tensor(Y_train)
tensor_train_y = tensor_train_y.long()
trainset = utils.data.TensorDataset(tensor_train_x, tensor_train_y)
trainloader = utils.data.DataLoader(trainset)
tensor_test_x = torch.Tensor(X_test.reshape(-1,1,32,32)) # transform to torch tensor
tensor_test_y = torch.Tensor(Y_test)
tensor_test_y = tensor_test_y.long()
testset = torch.utils.data.TensorDataset(tensor_test_x, tensor_test_y)
testloader = torch.utils.data.DataLoader(testset)
model = ConvNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate)
# Train the model
total_step = len(trainloader)
loss_list = []
acc_list = []
for epoch in range(self.epoch):
for i, (images, labels) in enumerate(trainloader):
# Run the forward pass
outputs = model(images)
loss = criterion(outputs, labels)
loss_list.append(loss.item())
# Backprop and perform Adam optimisation
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc_list.append(correct / total)
if (i + 1) % 2000 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, self.epoch, i + 1, total_step, loss.item(),
(correct / total) * 100))