Custom Layer without trainable parameters

I’m need to modify the pretrained alexnet model to process a sequence of images. During processing an image from a sequence of images, the input tensor to a convolutional layer is first subtracted from the input tensor of previous image that I stored before. Then the convolution is performs of the subtraction results.

So I need to store the latest input tensor to convolution layers and update that tensor during processing the sequence of images. I made a new custom layer which locates before each convolution layer. This layer stores the latest tensor to convolution layers and subtracts this tensor from input tensor to convolution and pass the subtraction to conv layer. What type of variable can I use to store the latest tensor? Does the following structure make sense ?

import torch
import torch.nn as nn
from .utils import load_state_dict_from_url


__all__ = ['AlexNet', 'alexnet']


model_urls = {
    'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}


class subtraction_layer(nn.Module):
     def __int__(self):
         super(subtraction_layer,self).__int__()
         #define a variable to store the latest input tensor to convlayer  
         self.latest_tensor = #  Define some kind of Variable, it is notable that size of latest tensor is differs for each conv layer       
     def forward(self, input):
                x=input - self.latest_tensor
                self.latest_tensor = input
         return x

class AlexNet(nn.Module):
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()
        #instantiate subtraction layer
        self.subtraction_layer =subtraction_layer()
        self.features = nn.Sequential(
            #put new costum layer before each conv layer
            subtraction(),
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            #put new costum layer before each conv layer
            subtraction(),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            #put new costum layer before each conv layer
            subtraction(),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            #put new costum layer before each conv layer
            subtraction(),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            #put new costum layer before each conv layer
            subtraction(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


def alexnet(pretrained=False, progress=True, **kwargs):
    r"""AlexNet model architecture from the
    `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    model = AlexNet(**kwargs)
    if pretrained:
        state_dict = load_state_dict_from_url(model_urls['alexnet'],
                                              progress=progress)
        model.load_state_dict(state_dict)
    return model

The code looks generally alright.
Based on the logic you are using, I assume you don’t want to train the latest_tensor?
If that’s correct, you could register is as a buffer (self.register_buffer('latest_tensor', torch.zeros(...))) and store the detached version of the input:

self.latest_tensor = input.detach().clone()

Thanks @ptrblck. You helped a lot.