Visualize t-sne plot for last layer of features of CNN_LSTM network

I have this CNN_LSTM classifier and I want to plot t-sne for the last layer of this classifier (not the classification layer, last features layer. Something like this but in PyTorch, not Keras). How can I do that?

class CNN_LSTM(nn.Module):

  def __init__(self,vocab_vectors,padding_idx,batch_size):
      super(Sentiment,self).__init__()
      print('Vocab vectors size:',vocab_vectors.shape)
      self.batch_size = batch_size
      self.hidden_dim = 128
      
      self.embedding = nn.Embedding.from_pretrained(vocab_vectors)
      self.embedding.weight.requires_grad = False
      self.embedding.padding_idx = padding_idx
      
      self.cnns =  nn.ModuleList([nn.Conv1d(in_channels=vocab_vectors.shape[1], out_channels=32, kernel_size=k) for k in [3,4,5]])
      
      self.lstm = nn.LSTM(input_size=32, 
                          hidden_size=self.hidden_dim,                             
                          bidirectional=True,  
                          batch_first=True)        
      self.linear1 = nn.Linear(2*self.hidden_dim,self.hidden_dim)        
      self.linear2 = nn.Linear(self.hidden_dim,1)
      self.dropout = nn.Dropout(0.2)

  @staticmethod
  def conv_and_max_pool(x, conv):
      """Convolution and global max pooling layer"""
      return F.max_pool1d(F.elu(conv(x)),1).permute(0, 2, 1)
      
  # https://github.com/gaussic/text-classification/blob/master/cnn_pytorch.py
  def forward(self,x):
      hidden = (torch.zeros(2, x.shape[0], self.hidden_dim).cuda(),
              torch.zeros(2, x.shape[0], self.hidden_dim).cuda())
      e = self.embedding(x)
      # Conv1d takes in (batch, channels, seq_len), but raw embedded is (batch, seq_len, channels)
      e = e.permute(0,2,1)
      cnn_outs = []
      for conv in self.cnns:
          f =self.conv_and_max_pool(e,conv)
          cnn_outs.append(f)
      out = torch.cat(cnn_outs, dim=1).cuda()
      print(out.shape)
      _, hidden = self.lstm(out,hidden)
      out = torch.cat((hidden[0][-2,:,:], hidden[0][-1,:,:]), dim=1).cuda()
      
      return self.linear2(self.dropout(F.relu(self.linear1(out))))
1 Like