Save the output matrix of every layer of autoencoder

Using PyTorch, I am able to create an autoencoder like the one given below. How do I save the output matrix to a .csv file after every layer?

class Autoencoder(nn.Module):
    def __init__(self, ):
        super(Autoencoder, self).__init__()
        self.fc1 = nn.Linear(10000, 5000)
        self.fc2 = nn.Linear(5000, 2000)
        self.fc3 = nn.Linear(2000, 500)
        self.fc4 = nn.Linear(500, 100)
        self.fc5 = nn.Linear(100, 500)
        self.fc6 = nn.Linear(500, 2000)
        self.fc7 = nn.Linear(2000, 5000)
        self.fc8 = nn.Linear(5000, 10000)
        self.relu = nn.Relu()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.relu(self.fc3(x))
        x = self.relu(self.fc4(x))
        x = self.relu(self.fc5(x))
        x = self.relu(self.fc6(x))
        x = self.relu(self.fc7(x))
        x = self.relu(self.fc8(x))
        return x
1 Like

def forward(self, x):
x1 = self.relu(self.fc1(x))
x2 = self.relu(self.fc2(x1))
x3 = self.relu(self.fc3(x2))
x4 = self.relu(self.fc4(x3))
x5 = self.relu(self.fc5(x4))
x6 = self.relu(self.fc6(x5))
x7 = self.relu(self.fc7(x6))
x8 = self.relu(self.fc8(x7))
return x1 x2 x3 x4 x5 x6 x7 x8

then when u call the model … u can save all the outputs from x1 to x8 in whatever format as you want.

1 Like

Can you please write a full code? I am not sure how to save the outputs after calling the forward() function.

@ptrblck sorry for disturbing… Can you please help?

You can use @Supreet’s code to return the outputs (just add commas between the returned tensors).
Once you grab the outputs, you could save them to a .csv.
Here is a small example:

class Autoencoder(nn.Module):
    def __init__(self, ):
        super(Autoencoder, self).__init__()
        self.fc1 = nn.Linear(100, 50)
        self.fc2 = nn.Linear(50, 20)
        self.fc3 = nn.Linear(20, 5)
        self.fc4 = nn.Linear(5, 1)
        self.fc5 = nn.Linear(1, 5)
        self.fc6 = nn.Linear(5, 20)
        self.fc7 = nn.Linear(20, 50)
        self.fc8 = nn.Linear(50, 100)
        self.relu = nn.ReLU()

    def forward(self, x):
        x1 = self.relu(self.fc1(x))
        x2 = self.relu(self.fc2(x1))
        x3 = self.relu(self.fc3(x2))
        x4 = self.relu(self.fc4(x3))
        x5 = self.relu(self.fc5(x4))
        x6 = self.relu(self.fc6(x5))
        x7 = self.relu(self.fc7(x6))
        x8 = self.relu(self.fc8(x7))
        return x1, x2, x3, x4, x5, x6, x7, x8


model = Autoencoder()
x = torch.randn(1, 100)
outputs = model(x)
for i, output in enumerate(outputs):
    np.savetxt(
        'output{}.csv'.format(i),
        output.detach().numpy(),
        delimiter=',')
2 Likes

Thanks a lot. It works… I just made one change. I converted my model to double format instead of Float.

Hey, one more question. The code above works in case I want to get the representation of the original data in the middle layers. What if I want to know the weights in the middle layers?

For e.g., as per @ptrblck’s code, can I get the 50x20 and 20x5 matrices containing the weights?

You can just call it directly:

weights = model.fc1.weight
bias = model.fc1.bias