How can I gets output of one layer?

Hello. I need to change last layer after training in test phase but the whole image was changed!! while I just need to put second row instead of first row. I think my code has problem.

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

#Converting data to torch.FloatTensor
transform = transforms.ToTensor()

# Download the training and test datasets
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)

test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)

#Prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, num_workers=0)

#Define the Convolutional Autoencoder
class ConvAutoencoder(nn.Module):
    def __init__(self):
        super(ConvAutoencoder, self).__init__()
       
        #Encoder
        self.conv1 = nn.Conv2d(1, 16, 3, stride=2, padding=1)
        self.conv2 = nn.Conv2d(16, 8, 3, stride=2, padding=1)
        self.conv3 = nn.Conv2d(8,8,3)
    
        #Decoder
        self.conv4 = nn.ConvTranspose2d(8, 8, 3)
        self.conv5 = nn.ConvTranspose2d(8, 16, 3, stride=2, padding=1, output_padding=1)
        self.conv6 = nn.ConvTranspose2d(16, 1, 3, stride=2, padding=1, output_padding=1)

    def forward(self, x):
        x = F.relu(self.conv1(x))      
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))  

        x = F.relu(self.conv4(x))
        x = F.relu(self.conv5(x))
        x = F.relu(self.conv6(x))
        
        return x

#Instantiate the model
model = ConvAutoencoder()
print(model)


def train(model, num_epochs=20, batch_size=64, learning_rate=1e-3):
    torch.manual_seed(42)
    criterion = nn.MSELoss() # mean square error loss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate, 
                                 weight_decay=1e-5) # <--
   # train_loader =train_loader;

    outputs = []
    for epoch in range(num_epochs):
        for data in train_loader:
            img, _ = data
            recon = model(img)
            loss = criterion(recon, img)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        print('Epoch:{}, Loss:{:.4f}'.format(epoch+1, float(loss)))
        outputs.append((epoch, img, recon),)
    return outputs

#test_image = test_loader.open(test_image_name).convert('RGB')

model =  ConvAutoencoder()
max_epochs =10
outputs = train(model, num_epochs=max_epochs)

for k in range(0, max_epochs, 5):
    plt.figure(figsize=(5, 2))
    imgs = outputs[k][1].detach().numpy()
    recon = outputs[k][2].detach().numpy()
    for i, item in enumerate(imgs):
        if i >= 5: break
        plt.subplot(2, 5, i+1)
        #plt.imshow(item[0])
        plt.imshow(item[0].reshape(28,28), cmap="gray") 


    for i, item in enumerate(recon):
        if i >= 5: break
        plt.subplot(2, 5, 5+i+1)
      # plt.imshow(item[0])
        plt.imshow(item[0].reshape(28,28), cmap="gray") 


b=(ConvAutoencoder().conv6.weight)

a0=b[0,0,0,:]
a1=b[0,0,1,:]

a0=a1

               

model.conv6.weight = nn.Parameter(b)

print('--------- b ----------')
print(b)
print('------- conv6 --------')
print(model.conv6.weight)


#test phase

def test(model):
    outputs1 = [] 
    with torch.no_grad():

        for epoch in range(10):  
            for data1 in test_loader:
                img1, _ = data1
                recon1 = model(img1)
            outputs1.append((epoch, img1, recon1),)
    return outputs1

outputs1 = test(model)


for k in range(0, 10, 9):
    plt.figure(figsize=(10, 2))
    imgs1 = outputs1[k][1].detach().numpy()
    recon1 = outputs1[k][2].detach().numpy()
    for i, item in enumerate(imgs1):
        if i >= 10: break
        plt.subplot(2, 10, i+1)
        #plt.imshow(item[0])
        plt.imshow(item[0].reshape(28,28), cmap="gray") 

    for i, item in enumerate(recon1):
        if i >= 10: break
        plt.subplot(2, 10, 10+i+1)
        plt.imshow(item[0].reshape(28,28), cmap="gray")



![image|594x141](upload://1hM5JuHhOIPvXVv5d4j1D884e29.png)

please guide me for this purpose

In general, my goal is to change the output of one layer (it may be the middle layer or the last layer) and see in the test phase whether the output of the image also follows this change or not. For example, move the first and second lines of the output of a layer and see if my output image follows this change or not? Thank you for your help.

I’m unsure what exactly you would like to change, as the code snippet seems to change a filter kernel (but doesn’t reassign it) while the description seems to target the change of an output activation.
In the latter case you could manipulate the activations in the forward method of your model and pass the changed one to the next layer.

" In the latter case you could manipulate the activations in the forward method of your model and pass the changed one to the next layer."

thanks, how I can do this with code?

Here is a small example:

class MyModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.lin1 = nn.Linear(10, 10)
        self.lin2 = nn.Linear(10, 10)
        
    def forward(self, x):
        out = self.lin1(x)
        # manipulate activation
        out = torch.cat((
            out[:, 0:1] + 1.,
            out[:, 1:] + 2.), dim=1)
        out = self.lin2(out)
        return out

model = MyModel()
x = torch.randn(1, 10)
out = model(x)

I do this process after train my model?

I’m not familiar with your use case and don’t know when you would like to use it.
If you want to change the forward method after the training, you could use e.g. a flag you could pass to it and select the desired code path (with the manipulation) or use forward hooks as described here.

I need to change output of last layer after training in test phase.

"a flag you could pass to it and select the desired code path (with the manipulation) "

for this part can you give me code of pytorch?

I need to change output of last layer after training in test phase.

"a flag you could pass to it and select the desired code path (with the manipulation) "

for this part can you give me code of pytorch?