Hi everyone,
I have a code as following. The accuracy and loss of validation (I am training and validation on the same dataset) are not converging!
idx_l is a list with a size of 96.
I really appreciate any help you can provide.
class Net(nn.Module):
def __init__(self, num_input, num_hidden, num_output, dropout,
activation='tanh'):
super(Net, self).__init__()
self.dropout = nn.Dropout(dropout)
self.fc1 = nn.Linear(num_input, num_hidden)
self.fc2 = nn.Linear(num_hidden, num_output)
if activation == 'tanh':
self.activation_f = torch.tanh
elif activation == 'relu':
self.activation_f = torch.relu
def forward(self, x):
x = self.activation_f(self.fc1(x))
x = self.dropout(x)
x = torch.sigmoid(self.fc2(x))
return x
model_net= Net(num_input=8, num_hidden=4, num_output=2, dropout=0.0, activation=‘tanh’)
def model(data, label, idx_l, model_net):
dataset = TensorDataset(data, label)
data_loader = DataLoader(dataset, batch_size=len(idx_l), shuffle=False, drop_last=True)
model_net.train()
optimizer = torch.optim.SGD(model_net.parameters(), lr=0.0001, momentum=0.9)
max_nr_batches = 1
iteration_count = 0
for batch_idx, (data, label) in enumerate(data_loader):
data, label = data.to(device), label.to(device)
optimizer.zero_grad()
data = data.clone().detach().requires_grad_(True)
output= model_net(data)
model_net.to(device)
loss = loss_fn(pred=output, target=label)
loss.backward(retain_graph=True)
grad = torch.autograd.grad(outputs=loss, inputs=data)
optimizer.step()
return grad, model_net
def validation(data, label,idx_l, model_net):
dataset = TensorDataset(data, label)
data_loader = DataLoader(dataset, batch_size=32, shuffle=False, drop_last=True)
model_net.eval()
iteration_count = 0
val_loss = 0.0
correct = 0
max_nr_batches = 3
with torch.no_grad():
for batch_idx, (data, label) in enumerate(data_loader):
data, label = data.to(device), label.to(device)
output= model_net(data)
val_loss += loss_fn(output, label).item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(label.view_as(pred)).sum().item()
if iteration_count >= max_nr_batches >= 0:
break
val_loss /= len(data_loader.dataset)
accuracy = 100.0 * correct / len(data_loader.dataset)
return val_loss, correct, len(data_loader.dataset), accuracy