Word Embedding using CBOW

HI I am new Deep Learning and PyTorch , I have coded CBOW model in pytorch but when I am trying to run the code it throws class torch.LongTensor error can anyone help me debug it?please

code:

import torch
from torch.autograd import Variable     
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class CBOW(nn.Module):    
   def __init__(self,vocab_size,embedding_size,context_size):
         super(CBOW,self).__init__()
         self.fc1 = nn.Linear(vocab_size,embedding_size)
         self.fc2 = nn.Linear(embedding_size,vocab_size) 
    def forward(self,x):
        y = []
        for i in xrange(0,174,29):
            y.append(self.fc1(x[:,i:i+29]))
        
        embedding = Variable(torch.zeros(1,128))
        for i in xrange(len(y)):
            embedding = embedding + y[i]
            
        embedding = embedding/len(y)
        x = self.fc2(embedding)
        return [F.softmax(x),embedding]

def make_corpa(data):
    vocab = ""
    for i in data:
        vocab = vocab + " " + i
    vocab.strip(" ")
    corpa = {}
    all_words = list(set(vocab.split(" ")))
    for i in xrange(len(all_words)):
        corpa[all_words[i]] = i
    
    return [corpa,len(corpa),corpa.keys()]

def conv_vect(word,corpa):
    temp = torch.FloatTensor(1,len(corpa)).zero_()
    temp[0][corpa[word]] = 1.0
    return temp

def train_word2vec(vocab_size,embedding_dim,number_of_epochs,data):
    model = CBOW(vocab_size,embedding_dim,6)
    loss = nn.CrossEntropyLoss()
    context,word = make_training_data(data,3)
    corpa = make_corpa(data)[0]
    optimizer = optim.SGD(model.parameters(),lr= 0.01)
    for epoch in xrange(number_of_epochs):
        for i in xrange(len(context)):
            context_vec_tmp = [conv_vect(j,corpa) for j in context[i]]
            context_vec = Variable(torch.cat(tuple([context_vec_tmp[j] for j in xrange(len(context_vec_tmp))]),1))
            word_vec = Variable(conv_vect(word[i],corpa))
            predict = model(context_vec)[0]
            predicted = torch.LongTensor(predict.size()[0],predict.size()[1]).zero_()
            for i in xrange(predict.size()[1]):
                predicted[0][i] = int(predict[0][i].data[0]/torch.max(predict.data[0]))
            
            word_vec.data = torch.Tensor.long(word_vec.data)
            predicted = Variable(predicted)
            print predicted.data
            print word_vec.data
            model.zero_grad()
            l = loss(predicted,word_vec)
            l.backward()
            optimizer.step()
    
    return model

def make_training_data(data,context_size):
    context = []
    word = []
    for i in data:
        temp = i.split(" ")
        for j in xrange(context_size,len(temp)-context_size,1):
            context.append([temp[j - context_size],temp[j - context_size + 1],temp[j - context_size + 2],temp[j + context_size - 2],temp[j + context_size - 1],temp[j + context_size]])
            word.append(temp[j])
    
    return context,word

train_word2vec(make_corpa(po)[1],128,10000,po)

the error is :

  KeyError                                  Traceback (most recent call last)    
<ipython-input-12-c4d942812d63> in <module>()
  ----> 1 train_word2vec(make_corpa(po)[1],128,10000,po)
<ipython-input-10-aa65a56267f9> in train_word2vec(vocab_size, embedding_dim, number_of_epochs, data)
     20             print word_vec.data
     21             model.zero_grad()
---> 22             l = loss(predicted,word_vec)
     23             l.backward()
     24             optimizer.step()

/usr/local/lib/python2.7/dist-packages/torch/nn/modules/module.pyc in __call__(self, *input, **kwargs)
    204 
    205     def __call__(self, *input, **kwargs):
--> 206         result = self.forward(*input, **kwargs)
    207         for hook in self._forward_hooks.values():
    208             hook_result = hook(self, input, result)

/usr/local/lib/python2.7/dist-packages/torch/nn/modules/loss.pyc in forward(self, input, target)
    319         _assert_no_grad(target)
    320         return F.cross_entropy(input, target,
--> 321                                self.weight, self.size_average)
    322 
    323 

/usr/local/lib/python2.7/dist-packages/torch/nn/functional.pyc in cross_entropy(input, target, weight, size_average)
    531                 for each minibatch.
    532     """
--> 533     return nll_loss(log_softmax(input), target, weight, size_average)
    534 
    535 

/usr/local/lib/python2.7/dist-packages/torch/nn/functional.pyc in log_softmax(input)
    432 
    433 def log_softmax(input):
--> 434     return _functions.thnn.LogSoftmax()(input)
    435 
    436 

/usr/local/lib/python2.7/dist-packages/torch/nn/_functions/thnn/auto.pyc in forward(self, input, *params)
    108 
    109     def forward(self, input, *params):
--> 110         self._backend = type2backend[type(input)]
    111 
    112         for param in params:

/usr/local/lib/python2.7/dist-packages/torch/_thnn/__init__.pyc in __getitem__(self, name)
     13 
     14     def __getitem__(self, name):
---> 15         return self.backends[name].load()
     16 
     17 

KeyError: <class 'torch.LongTensor'> 

Thank you

The predicted of CrossEntropyLoss should be a FloatTensor , but predicted in your code contains LongTensor.

1 Like