Requires_gradient is False for loss

Hi,

I have a problem for loss.backward() as the requires_gradient=false after computation.
output.shape = [20,1] as well as target and criterion = nn.BCELoss()

Any idea?

Thanks

Could you post your forward code?
Maybe you’ve used .data somewhere or detached your tensor otherwise.

1 Like

Thank you for quick response. Here is my code:

class SimpleNet(nn.Module):
    def __init__(self, num_classes=2):
        super(SimpleNet, self).__init__()

        self.conv1 = nn.Conv3d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1)
        self.relu1 = nn.ReLU()
        self.conv2 = nn.Conv3d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1)
        self.relu2 = nn.ReLU()
        self.pool = nn.MaxPool3d(kernel_size=2)
        self.conv3 = nn.Conv3d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1)
        self.relu3 = nn.ReLU()
        self.conv4 = nn.Conv3d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1)
        self.relu4 = nn.ReLU()
        self.fc = nn.Linear(in_features= 16 * 16 * 16 * 32, out_features=num_classes)
        self.softmax = nn.LogSoftmax()

    def forward(self, input):
        output = self.conv1(input)
        output = self.relu1(output)
        output = self.conv2(output)
        output = self.relu2(output)
        output = self.pool(output)
        output = self.conv3(output)
        output = self.relu3(output)
        output = self.conv4(output)
        output = self.relu4(output)
        output = output.view(-1, 16 * 16 * 16 * 32)
        output = self.fc(output)
        output = self.softmax(output)
        _, output = output.max(1)
        return output

net = SimpleNet()
opt = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.BCELoss()

def train_epoch(model, opt, criterion, batch_size=20):
    model.train()
    losses = []
    for i in range(0, X.size(0), batch_size):
        x_batch = X[i:i + batch_size, :]
        y_batch = Y[i:i + batch_size, :]
        x_batch = Variable(x_batch)
        y_batch = Variable(y_batch)
        opt.zero_grad()
        y_hat = net(x_batch).type(torch.FloatTensor)
        y_hat = torch.unsqueeze(y_hat, 1)
        loss = criterion(y_hat, y_batch) 
        loss.backward()
        opt.step()        
        losses.append(loss.data.numpy())
    return losses