Hello , I want to train a CNN for regression, because the output is a continuous variable, but I got this message: element 0 of tensors does not require grad and does not have a grad_fn

I saw Forums where appears the same problem, but I still don’t find the solution

Can anyone help me??

Here is my code

```
#class SimpleNet(nn.Module):
class CNN(nn.Module):
def __init__(self):
#super(SimpleNet, self).__init__()
super(CNN, self).__init__()
self.conv1= nn.Conv2d(in_channels=3, out_channels=96, kernel_size=5, stride=1)
self.relu1= nn.ReLU()
self.norm1= nn.BatchNorm2d(96)
self.conv2 = nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=1, padding=2)
self.relu2 = nn.ReLU()
self.norm2= nn.BatchNorm2d(256)
self.conv3 = nn.Conv2d(in_channels=256, out_channels=384, kernel_size=3, stride=1, padding=1)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, padding=1)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(in_channels=384, out_channels=64, kernel_size=3, stride=1, padding=1)
self.relu5 = nn.ReLU()
self.pool1= nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(in_features=4096, out_features=4096)
self.fc2 = nn.Linear(in_features=4096, out_features=4096)
#self.fc3 = nn.Linear(in_features=4096, out_features=238)
self.fc3 = nn.Linear(in_features=4096, out_features=1)
def forward(self, x):
x=x.float()
out = self.conv1(x)
out = self.relu1(out)
out = self.norm1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.norm2(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.pool1(out)
out = out.view(-1, 4096)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
# Model
model = CNN()
CUDA = torch.cuda.is_available()
if CUDA:
model = model.cuda()
loss_fn = nn.MSELoss()
#optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.0001)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=0.0001)
#Training CNN
import time
num_epochs = 2
#Define the lists to store the results of loss and accuracy
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
for epoch in range(num_epochs):
#Reset s variables to cero in each epoch
start = time.time()
correct = 0
iterations = 0
iter_loss = 0.0
model.train() # Training Mode
for i, (input, lab) in enumerate(train_load):
# Convert torch tensor to Variable
input = Variable(input)
lab = Variable(lab)
lab = torch.DoubleTensor(25)
lab = lab.type(torch.cuda.FloatTensor)
# GPU
CUDA = torch.cuda.is_available()
if CUDA:
input = input.cuda()
lab = lab.cuda()
optimizer.zero_grad() # clean gradient
outputs = model(input)
outputs = torch.DoubleTensor(25)
outputs = outputs.type(torch.cuda.FloatTensor)
loss = loss_fn(outputs, lab)
iter_loss += loss.item()
loss.backward() # Backpropagation
optimizer.step() # update weights
torch.cuda.empty_cache()
iterations += 1
# Save function loss
train_loss.append(iter_loss/iterations)
#Test
loss = 0.0
correct = 0
iterations = 0
model.eval()
for i, (input, lab) in enumerate(test_load):
# Convert torch tensor to Variable
input = Variable(input)
lab = Variable(lab, requires)
lab = torch.DoubleTensor(25)
lab = lab.type(torch.cuda.FloatTensor)
CUDA = torch.cuda.is_available()
if CUDA:
input = input.cuda()
lab = lab.cuda()
outputs = model(input)
outputs = torch.DoubleTensor(25)
outputs = outputs.type(torch.cuda.FloatTensor)
loss = loss_fn(outputs, lab) # Calculate the loss
loss += loss.item()
torch.cuda.empty_cache()
iterations += 1
# Record the Testing loss
test_loss.append(loss/iterations)
stop = time.time()
```