Hi all

I am trying to build an auto encoder. Actually I did it, however I was wondered to see if there is anyway of plotting latent space ? (my network shrink to two numbers in the latent space) I assumed that no PCA is required then.

below u can find my codes:

#Autoencoder and Autodecoder conv layer…

class Autoencoder(nn.Module):

def **init**(self):

super(Autoencoder,self).**init**()

```
self.encoder = nn.Sequential(
nn.Conv1d(1,5,kernel_size=5, stride=2),
nn.MaxPool1d(3, stride=1),
nn.ReLU(True),
```

nn.Conv1d(5,10,kernel_size=5, stride = 2),

nn.MaxPool1d(3,stride=1),

nn.ReLU(True),

```
nn.Conv1d(10,15,kernel_size=5, stride=2),
nn.MaxPool1d(3,stride = 1),
nn.ReLU(True),
nn.Conv1d(15,20,kernel_size = 4, stride = 1),
nn.ReLU(True))
self.decoder = nn.Sequential(
nn.ConvTranspose1d(20,15,kernel_size = 1, stride = 4),
nn.ReLU(True),
nn.ConvTranspose1d(15,10,kernel_size = 2,stride = 4),
nn.ReLU(True),
nn.ConvTranspose1d(10,5,kernel_size= 9,stride = 2),
nn.ReLU(True),
nn.ConvTranspose1d(5,1,kernel_size = 10, stride = 2),
nn.ReLU (True))
```

def forward(self,x):

x = self.encoder(x)

x = self.decoder(x)

return x

model = Autoencoder().cuda()

criterion = nn.MSELoss()

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,

weight_decay=1e-5)

#training loop

loss = 0

epoch_num = 0

error = []

for epoch in range(num_epochs):

for data in train_loader:

img = data

img = Variable(img).cuda()

```
output = model (img)
loss = criterion (output,img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
error.append(loss.item())
if epoch%10 == 9:
epoch_num += epoch_num
print ('\r Train Epoch : {}/{} \tLoss : {:.4f}'.format (epoch+1,num_epochs,loss/32))
```

model_save_name = ‘autoencoder2.pt’

path = F"/content/gdrive/My Drive/{model_save_name}"

torch.save(model.state_dict(), path)

plt.plot(error)

/////////////////

thank you