Same output always

Hi,

My NN always predicts the same output with same value. My input is one by one, although I have tried different batch sizes with same result. I cannot figure out why…any help will be much appreciated. My code is below:

from sklearn.model_selection import train_test_split
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.1, random_state=RANDOM_STATE)

df=pd.DataFrame(Xtrain)
de=pd.DataFrame(Ytrain)
dataset_x=df.values
dataset_y=de.values

x_data=torch.from_numpy(dataset_x)
y_data=torch.from_numpy(dataset_y)


class Model(torch.nn.Module):
    def __init__(self,vocab_size, EMBED_SIZE):
        super(Model,self).__init__()
        self.embeddings = torch.nn.Embedding(vocab_size, EMBED_SIZE)
        self.l1=torch.nn.Linear(39*EMBED_SIZE,50)
        self.l2=torch.nn.Linear(50,40)
        self.l3=torch.nn.Linear(40,30)
        self.l4=torch.nn.Linear(30,20)
        self.l5=torch.nn.Linear(20,14)
    def forward(self,x):
        
        x = self.embeddings(x).view(1,-1)
       
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))

        return self.l5(x)

model=Model(VOCAB_SIZE, EMBED_SIZE)

criterion=torch.nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model.parameters(), lr=0.01)


for epoch in range(100):
    for train,labels in zip(x_data,y_data):
        train = Variable(train).long()
        labels=Variable(labels)
        labels=labels.view(-1)
       
        model.zero_grad()
        y_pred=model(train)
        print(y_pred)
        
        loss=criterion(y_pred,labels)
        
        loss.backward()
        optimizer.step()
        
        if epoch % 5 ==0:
            print("loss: ", loss)
  
df=pd.DataFrame(Xtest)
dataset_x=df.values
x_data=torch.from_numpy(dataset_x)
de=pd.DataFrame(Ytest)

dataset_y=de.values


y_data=torch.from_numpy(dataset_y)

model.eval()
for test,ans in zip(x_data,y_data):
    test = Variable(test).long()
    ans=Variable(ans)
    output=model(test)
    loss=criterion(y_pred,ans)
    print(torch.max(output.data,1))

""PART OF THE RESULT IS BELOW:

torch.Size([1, 3900])
(tensor([1.3063]), tensor([11]))
tensor([   0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
           0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
           0,    0,    0,    0,    0, 1124, 1212, 1510,  679,  468,  846,   75,
         355,  373,  468])
torch.Size([1, 3900])
(tensor([1.3063]), tensor([11]))
tensor([  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
          0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
          0,   0,   0,   0,   0,   0,   0, 321, 113,  43,  69])
torch.Size([1, 3900])
(tensor([1.3063]), tensor([11]))
tensor([  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
          0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
          0,   0,   0,   0,   0,   0,   0, 557, 985, 157,   0])
torch.Size([1, 3900])
(tensor([1.3063]), tensor([11]))
tensor([   0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
           0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
           0,    0,    0,    0,    0,  586, 2130,  247, 6303,  103,  264,   19,
        1020,   21, 8826])
torch.Size([1, 3900])

Can you also print the loss. Is loss decreasing/increasing or same?

Also, can you try normalizing your data?

For readability, I would suggest to put your source under </> (Preformatted text option)

Hi bhusan23,

I figured out problem, the problem was that the embedding layer was giving an error after some training therefore the model was not fully trained and was giving the same prediction always.

Thanks