Tensorflow to Pytorch

Guys, how to convert the following TensorFlow code to pytorch? Please, help with the conversion.

self.initializer = tf.random_normal_initializer(mean=0, stddev=0.01)
self.initializer_param = tf.random_uniform_initializer(minval=-np.sqrt(3 / self.global_dimension),
maxval=-np.sqrt(3 / self.global_dimension))

    self.user_id = tf.placeholder(tf.int32, shape=[None], name='user_id')
    self.item_id = tf.placeholder(tf.int32, shape=[None], name='item_id')
    
    self.current_session = tf.placeholder(tf.int32, shape=[None], name='current_session')
    self.pre_sessions = tf.placeholder(tf.int32, shape=[None], name='pre_sessions')
    self.neg_item_id = tf.placeholder(tf.int32, shape=[None], name='neg_item_id')

    self.user_embedding_matrix = tf.get_variable('user_embedding_matrix', initializer=self.initializer,
                                                 shape=[self.user_number, self.global_dimension])
    self.item_embedding_matrix = tf.get_variable('item_embedding_matrix', initializer=self.initializer,
                                                 shape=[self.item_number, self.global_dimension])
    self.the_first_w = tf.get_variable('the_first_w', initializer=self.initializer_param,
                                       shape=[self.global_dimension, self.global_dimension])
    self.the_second_w = tf.get_variable('the_second_w', initializer=self.initializer_param,
                                        shape=[self.global_dimension, self.global_dimension])
    self.the_first_bias = tf.get_variable('the_first_bias', initializer=self.initializer_param,
                                          shape=[self.global_dimension])
    self.the_second_bias = tf.get_variable('the_second_bias', initializer=self.initializer_param,
                                           shape=[self.global_dimension])

def attention_level_one(self, user_embedding, pre_sessions_embedding, the_first_w, the_first_bias):
    # self.weight = tf.nn.softmax(tf.multiply(tf.sigmoid(
    #     tf.add(tf.matmul(pre_sessions_embedding, the_first_w), the_first_bias)), user_embedding))

  
    self.weight = tf.nn.softmax(tf.transpose(tf.matmul(tf.sigmoid(
        tf.add(tf.matmul(pre_sessions_embedding, the_first_w), the_first_bias)), tf.transpose(user_embedding))))

    out = tf.reduce_sum(tf.multiply(pre_sessions_embedding, tf.transpose(self.weight)), axis=0)
    return out

def attention_level_two(self, user_embedding, long_user_embedding, current_session_embedding, the_second_w,
                        the_second_bias):


    self.weight = tf.nn.softmax(tf.transpose(tf.matmul(
        tf.sigmoid(tf.add(
            tf.matmul(tf.concat([current_session_embedding, tf.expand_dims(long_user_embedding, axis=0)], 0),
                      the_second_w),
            the_second_bias)), tf.transpose(user_embedding))))
    out = tf.reduce_sum(
        tf.multiply(tf.concat([current_session_embedding, tf.expand_dims(long_user_embedding, axis=0)], 0),
                    tf.transpose(self.weight)), axis=0)
    return out

def build_model(self):
    print('building model ... ')
    self.user_embedding = tf.nn.embedding_lookup(self.user_embedding_matrix, self.user_id)
    self.item_embedding = tf.nn.embedding_lookup(self.item_embedding_matrix, self.item_id)
    self.current_session_embedding = tf.nn.embedding_lookup(self.item_embedding_matrix, self.current_session)
    self.pre_sessions_embedding = tf.nn.embedding_lookup(self.item_embedding_matrix, self.pre_sessions)
    self.neg_item_embedding = tf.nn.embedding_lookup(self.item_embedding_matrix, self.neg_item_id)

    self.long_user_embedding = self.attention_level_one(self.user_embedding, self.pre_sessions_embedding,
                                                        self.the_first_w, self.the_first_bias)
    self.hybrid_user_embedding = self.attention_level_two(self.user_embedding, self.long_user_embedding,
                                                          self.current_session_embedding,
                                                          self.the_second_w, self.the_second_bias)

    # compute preference
    self.positive_element_wise = tf.matmul(tf.expand_dims(self.hybrid_user_embedding, axis=0),
                                           tf.transpose(self.item_embedding))
    self.negative_element_wise = tf.matmul(tf.expand_dims(self.hybrid_user_embedding, axis=0),
                                           tf.transpose(self.neg_item_embedding))
    self.intention_loss = tf.reduce_mean(
        -tf.log(tf.nn.sigmoid(self.positive_element_wise - self.negative_element_wise)))
    self.regular_loss_u_v = tf.add(self.lamada_u_v * tf.nn.l2_loss(self.user_embedding),
                                   self.lamada_u_v * tf.nn.l2_loss(self.item_embedding))
    self.regular_loss_a = tf.add(self.lamada_a * tf.nn.l2_loss(self.the_first_w),
                                 self.lamada_a * tf.nn.l2_loss(self.the_second_w))
    self.regular_loss = tf.add(self.regular_loss_a, self.regular_loss_u_v)
    self.intention_loss = tf.add(self.intention_loss, self.regular_loss)

  
    self.top_value, self.top_index = tf.nn.top_k(self.positive_element_wise, k=self.K, sorted=True)

def run(self):
    print('running ... ')
    with tf.Session() as self.sess:
        self.intention_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(
            self.intention_loss)
        init = tf.global_variables_initializer()
        self.sess.run(init)

        for iter in range(self.iteration):
            print('new iteration begin ... ')
            print('iteration: ', str(iter))

            while self.step * self.batch_size < self.dg.records_number:
                
                batch_user, batch_item, batch_session, batch_neg_item, batch_pre_sessions = self.dg.gen_train_batch_data(
                    self.batch_size)

                self.sess.run(self.intention_optimizer,
                              feed_dict={self.user_id: batch_user,
                                         self.item_id: batch_item,
                                         self.current_session: batch_session,
                                         self.neg_item_id: batch_neg_item,
                                         self.pre_sessions: batch_pre_sessions
                                         })

                self.step += 1
                # if self.step * self.batch_size % 5000 == 0:
            print('eval ...')
            self.evolution()
            print(self.step, '/', self.dg.train_batch_id, '/', self.dg.records_number)
            self.step = 0

       
        self.save()