Here is my network def: I am not usinf the sigmoid layer as cross entropy takes care of it. so I pass the raw logits to the loss function
import torch.nn as nn
class Sentiment_LSTM(nn.Module):
"""
We are training the embedded layers along with LSTM for the sentiment analysis
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
"""
Settin up the parameters.
"""
super(Sentiment_LSTM, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding layer and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer to avoida over fitting
self.dropout = nn.Dropout(0.5)
# linear and sigmoid layers
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x):
"""
Perform a forward pass
"""
batch_size = x.size(0)
x = x.long()
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = out
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1,3)
#print("sig_out",sig_out.shape)
sig_out = sig_out[:, -1,:] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out
def init_hidden(self, batch_size):
#initilizing hidden layers
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
My loss function:
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):nn.CrossEntropyLoss()
BCE_loss = nn.CrossEntropyLoss()(inputs, targets, reduce=False)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
My output has size of 3: It has to predict if the sentiment is positive , negative or neutral.
I data is imblanced. Neutral is around 7000, positve around 250 and negative around 800. Is my understanding and the implementation makes sense?