def train(epoch):
model.train()
tr_loss = 0
# getting the training set
x_train, y_train = Variable(train_x), Variable(train_y)
# getting the validation set
x_val, y_val = Variable(val_x), Variable(val_y)
# converting the data into GPU format
if torch.cuda.is_available():
#model.cuda()
x_train = x_train.cuda()
y_train = y_train.cuda()
x_val = x_val.cuda()
y_val = y_val.cuda()
# clearing the Gradients of the model parameters
optimizer.zero_grad()
# prediction for training and validation set
output_train = model(x_train.float())
output_val = model(x_val.float())
# computing the training and validation loss
loss_train = criterion(output_train, y_train.long())
loss_val = criterion(output_val, y_val.long())
train_losses.append(loss_train)
val_losses.append(loss_val)
# computing the updated weights of all the model parameters
###loss_train.backward()
optimizer.step()
tr_loss = loss_train.item()
if epoch % 2 == 0:
# printing the validation loss
print('Epoch : ', epoch + 1, '\t', 'loss :', loss_train, loss_val)
train_losses = []
# empty list to store validation losses
val_losses = []
correct = 0
total = 0
predictions =[]
actuals=[]
predictiontest=[]
# training the model
m1=0
m2=0
for epoch in range(n_epochs):
train(epoch)
# plotting the training and validation loss
plt.plot(train_losses, label='Training loss')
plt.plot(val_losses, label='Validation loss')
RuntimeError: Can’t call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead.
help me plzz