Saving extracted features

hi
I want to get and save extracted features which entered in the fully connected layer from different images in the test process with corresponding labels.

You could use forward hooks as described in this post. Alternatively, you could also just return each activation in the forward method.

thanks for your answer.
I put my code below.how can I change my code to get and save output from the last fully connected layer before softmax for each image with the actual label of image in the test process.

import torch
import torchvision
from torchvision import transforms
import torch.nn as nn
# Parameters
batch_size =256
n_class =4
lr = 0.0001
num_epochs =5
transform=transforms.Compose([transforms.Resize((400,400)),
                              transforms.ToTensor(),
                              transforms.Normalize((.5,.5,.5),(.5,.5,.5))])
# Load Custom Dataset
train_dataset = torchvision.datasets.ImageFolder('/content/drive/My Drive/extracted_motor_imagery/motor imagery/train',
                                                 transform=transform)
valid_dataset = torchvision.datasets.ImageFolder('/content/drive/My Drive/extracted_motor_imagery/motor imagery/validation',
                                                 transform=transform)
test_dataset = torchvision.datasets.ImageFolder('/content/drive/My Drive/extracted_motor_imagery/motor imagery/test',
                                                transform=transform)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True)
# Convolutional neural network
class convnet(nn.Module):
    def __init__(self):
        super(convnet,self).__init__()
        self.layer1 = nn.Sequential(nn.Conv2d(3,64, 5,1,2),
                                    nn.BatchNorm2d(64),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2, 2))
        self.layer2 = nn.Sequential(nn.Conv2d(64, 32, 5,1,2),
                                    nn.BatchNorm2d(32),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2, 2))
        self.layer3 = nn.Sequential(nn.Conv2d(32, 16, 5,1,2),
                                    nn.BatchNorm2d(16),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2, 2))
        self.fc1 = nn.Linear(50*50*16, 1000)
        self.drop_out = nn.Dropout(p=.5)
        self.fc2 = nn.Linear(1000,n_class) 
      
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = self.layer3(out)
        out = out.reshape(out.size(0), -1)
        out = self.fc1(out)
        out = self.drop_out(out)
        out = self.fc2(out)
        return out
      
      
      
# Model CNN
convmodel = convnet()
# loss
loss_fn = nn.CrossEntropyLoss()
# Optimizer
optimizer_fn = torch.optim.Adam(convmodel.parameters(), lr=lr)
# LR
lr_sch = torch.optim.lr_scheduler.StepLR(optimizer_fn, 1, gamma=0.5)
num_steps = len(train_loader)
valid_num_steps = len(valid_loader)
for i in range(num_epochs):
    convmodel.train()
    print(lr_sch.get_lr())
    for j, (imgs, lbls) in enumerate(train_loader):
        out = convmodel(imgs)
        loss_val = loss_fn(out, lbls)
        optimizer_fn.zero_grad()
        loss_val.backward()
        optimizer_fn.step()
        if (j+1) % 1 == 0:
            print('Train, Epoch [{}/{}] Step [{}/{}] Loss: {:.4f}'.
                  format(i+1, num_epochs, j+1, num_steps, loss_val.item()))
    convmodel.eval()
    corrects = 0
    for k, (imgs, lbls) in enumerate(valid_loader):
        out = convmodel(imgs)
        loss_val = loss_fn(out, lbls)
        predicted = torch.argmax(out, 1)
        corrects += torch.sum(predicted == lbls)
        print('Validation, Step [{}/{}] Loss: {:.4f} Acc: {:.4f} '.format(k + 1, valid_num_steps, loss_val.item(), 100. * corrects / ((k + 1) * batch_size)))
convmodel.eval()
corrects = 0
num_steps = len(test_loader)
for j, (imgs, lbls) in enumerate(test_loader):
    out = convmodel(imgs)
    predicted = torch.argmax(out, 1)
    corrects += torch.sum(predicted == lbls)
    print('Step [{}/{}] Acc {:.4f}: '.format(j+1, num_steps, 100.*corrects/((j+1)*batch_size)))

Here a baseline implementation on how to change the forward method without using hooks.