Swp memory increases very fast

transform = transforms.Compose(
    [transforms.Resize((375,375)), transforms.ToTensor()])
path = #path to folder containing Images
train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.ImageFolder(path, transform=transform),
    batch_size=100,
    num_workers=0,
    shuffle=True)
class DNN:
    def __init__(self):
        self.outputs = []
        self.model = models.resnet50(pretrained=True)
        self.model.avgpool.register_forward_hook(self.hook)
        self.model.eval()
    
    def hook(self,module, input, output):
        self.outputs.append(output)
    # Loop over 
    def step(self, inputs):
        data, label = inputs # ignore label
        _ = self.model(data)
        return 0

    
    def predict(self, dataloader):
        for i, batch in enumerate(dataloader):
            print(i)
            _ = self.step(batch)

a = DNN()
a.predict(train_loader)

I am trying to get features from the pre-final layer of resnet18 but even after a single batch, the swap memory on my cpu keeps increasing till my laptop runs out of memory.

I am not using a GPU. I’m using a 16gb MacBook pro and have about 25 GB of free space.

I’ve just tested the code on a V100 + CUDA10.2 and after the forward pass with a batch size of 100 approx. 23GB GPU memory will be used, which would explain why your swap is being filled.
You could reduce the batch size to save memory or use e.g. torch.utils.checkpoint to trade compute for memory.