Why my self defined layer does not enter backward?

class layer(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input,factors, bias=None):
        ctx.save_for_backward(input, factors, bias)
        print('fwd')
        output = .....
        return output

    @staticmethod
    def backward(ctx, grad_output):
        # input, weight, bias = ctx.input, ctx.weight, ctx.bias
        grad_input = grad_weight = grad_bias = None
        print('enter here!')  # <<<<<<<<<<<<<<<-------------------This does not printed!!!!
        grads, dx = bwd(factors, grad_output, input)  
        if ctx.needs_input_grad[0]:
            grad_input = dx
        if ctx.needs_input_grad[1]:
            grad_weight = grads
        if bias is not None and ctx.needs_input_grad[2]:
            grad_bias = grad_output.sum(0)
        return grad_input, grad_weight, grad_bias

As shown above, the print does not work!!! But the forward’s print can work!!! Why???
Thank you!!!

Your code works for me:

class layer(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input,factors, bias=None):
        ctx.save_for_backward(input, factors, bias)
        print('fwd')
        output = input.clone()
        return output

    @staticmethod
    def backward(ctx, grad_output):
        # input, weight, bias = ctx.input, ctx.weight, ctx.bias
        grad_input = grad_weight = grad_bias = None
        print('enter here!')  # <<<<<<<<<<<<<<<-------------------This does not printed!!!!
        return grad_input, grad_weight, grad_bias
    
f = layer.apply

x = torch.randn(1, 1, requires_grad=True)
out = f(x, torch.randn(1, 1)) # prints: fwd
out.mean().backward() # prints: enter here!
1 Like