---> 12 loss.backward(); RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

Hi everybody,

I’ve been trying to debug the error but don’t know what’s wrong.
Please let me know If you need more info.


class AutoEncoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder=nn.Sequential(nn.Linear(90, 40),
                                  nn.ReLU(),
                                  nn.Linear(40, 20),
                                  nn.ReLU(),
                                  nn.Linear(20, 8),
                                  );
        
        self.dencoder=nn.Sequential(nn.Linear(8, 20),
                                  nn.ReLU(),
                                  nn.Linear(20, 40),
                                  nn.ReLU(),
                                  nn.Linear(40, 90),
                                  nn.Tanh()
                                  );
        
    def forward(self, x):
        encoded = self.encoder(x);
        decoded = self.dencoder(encoded);
        return decoded;

_data=torch.tensor(newData, requires_grad = True);
lebels=torch.tensor(y_train, requires_grad = True);
_data=torch.utils.data.TensorDataset(_data, lebels);
trainDataLoader = DataLoader(dataset=_data, batch_size=15, shuffle=True)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = AutoEncoder();
model = model.to(device);
criterion = nn.MSELoss();
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5);

epochs=10
outputs=[]
for epoch in range(epochs):
    for (event , _) in trainDataLoader:
        print(event)
        #event=torch.tensor(event,requires_grad=True);
        recon = model(event.float());
        loss = criterion(recon, event);
        optimizer.zero_grad();
        loss.backward();
        optimizer.step();   
        break
    break
    print(f'Epoch:{epoch+1}, Loss:{loss.item():.4f}');
    outputs.append((epoch, event, recon));
````Preformatted text`

Below is the error I get.

------------------------------------------------------------------------
RuntimeError                           Traceback (most recent call last)
<ipython-input-249-8d5a9bb6eb61> in <module>
     10         loss = criterion(recon, event)
     11         optimizer.zero_grad();
---> 12         loss.backward();
     13         optimizer.step();
     14         break

~/.local/lib/python3.8/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
    243                 create_graph=create_graph,
    244                 inputs=inputs)
--> 245         torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
    246 
    247     def register_hook(self, hook):

~/.local/lib/python3.8/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
    143         retain_graph = create_graph
    144 
--> 145     Variable._execution_engine.run_backward(
    146         tensors, grad_tensors_, retain_graph, create_graph, inputs,
    147         allow_unreachable=True, accumulate_grad=True)  # allow_unreachable flag

RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

Your code runs fine in my setup:

_data = torch.utils.data.TensorDataset(torch.randn(15, 90), torch.randn(15, 90));
trainDataLoader = torch.utils.data.DataLoader(dataset=_data, batch_size=15, shuffle=True)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = AutoEncoder();
model = model.to(device);
criterion = nn.MSELoss();
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5);

epochs=10
outputs=[]
for epoch in range(epochs):
    for (event , _) in trainDataLoader:
        event = event.to(device)        
        recon = model(event);
        loss = criterion(recon, event);
        optimizer.zero_grad();
        loss.backward();
        optimizer.step();   
    print(f'Epoch:{epoch+1}, Loss:{loss.item():.4f}');
    outputs.append((epoch, event, recon));

Could you check, if you’ve globally disabled the gradient calculation via torch.set_grad_enabled(False)?

1 Like

I do not know what was wrong but just restarted my notebook and all is good now. Thanks