Backpropogation for WordEmbedding

Hi, I need the backpropgation for my WordEmbedding layer. The code is as below:

class WordEmbedding:
def init(self, vocab_size, embedding_dim, init_wt=0.01, weight=None):
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.bias = torch.zeros(0)
if weight is not None:
self.weight = weight
else:
self.weight = torch.randn(vocab_size, embedding_dim) * init_wt
self.grad_bias = torch.zeros(0)
self.grad_weight = torch.zeros(vocab_size, embedding_dim)
self.is_trainable = True

def get_indicator_matrix(self, indices):
    batch_size = indices.size(0)
    self.indicator_matrix = torch.zeros(batch_size, self.vocab_size)
    for i in range(batch_size):
        self.indicator_matrix[i, indices[i]] = 1.0

def forward(self, input):
    self.input = input
    self.batch_size = self.input.size(0)
    output = torch.zeros(self.input.size(0), self.input.size(1) * self.embedding_dim)
    for i in range(self.input.size(1)):
        self.get_indicator_matrix(self.input[:, i].long())
        output[:, i * self.embedding_dim:(i + 1) * self.embedding_dim] = \
            torch.mm(self.indicator_matrix, self.weight)
    return output

def backward(self, grad_output):
    output = 
    return output

def zero_grad(self):
    self.grad_weight.fill_(0.0)