Custom filters in conv2d backward

Hi, I want to modify the convolutional filters only in the backward pass to use some fixed random ones (Feedback Alignment,, so I wrote a custom module:

class Aconv2d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
        super(Aconv2d, self).__init__()
        if in_channels % groups != 0:
            raise ValueError('in_channels must be divisible by groups')
        if out_channels % groups != 0:
            raise ValueError('out_channels must be divisible by groups')

        kernel_size = _pair(kernel_size)
        stride = _pair(stride)
        padding = _pair(padding)
        dilation = _pair(dilation)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.transposed = False
        self.output_padding = _pair(0)
        self.groups = groups

        if self.transposed:
            self.weight = nn.Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size))
            self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size))
        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_channels))
            self.register_parameter('bias', None)

        self.backward_weight = torch.Tensor(self.weight.size())
        self.forward_weight =

    def reset_parameters(self):
        n = self.in_channels
        for k in self.kernel_size:
            n *= k
        stdv = 1. / math.sqrt(n), stdv)
        self.backward_weight.uniform_(-stdv, stdv)
        if self.bias is not None:
  , stdv)

    def forward(self, input):
        return F.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)

    def switch_mode(self, mode):
        if mode == 'backward':
        elif mode == 'forward':

Then I iterate over my model layers and call switch_mode for each of these layers before the forward call and the backward call, to use the right weights.

With this an iteration takes ~45 seconds on my GPU, while with plain backprop it takes ~15 seconds.
I tried writing my custom autograd function using conv2d_input and conv2d_weight but it’s even slower (~240 seconds).

I was wondering if there is a way to do this more efficiently. I was thinking about backward hooks, but my understanding is that they’re executed after the backward of the module, while here I’d need them to be performed before the module backward.

Any ideas/suggestions?