Is my converted TensorFlow model to PyTorch Correct?

Hi,
I am trying to convert the TensorFlow model to PyTorch. Can anyone please comment that both models are correct or have any issues because output is different. My pytorch model is giving constant loss.

TensorFlow

features = tf.placeholder(
                tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features')
labels = tf.placeholder(tf.int64, shape=[None], name='labels')
input_layer = tf.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
conv1 = tf.layers.conv2d(
              inputs=input_layer,
              filters=32,
              kernel_size=[5, 5],
              padding="same",
              activation=tf.nn.relu )
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(
                inputs=pool1,
                filters=64,
                kernel_size=[5, 5],
                padding="same",
                activation=tf.nn.relu )
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=2048, name='dense1', 
             kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001), 
             bias_regularizer=tf.contrib.layers.l2_regularizer(0.001))
act_1 = tf.nn.relu(dense)
logits = tf.layers.dense(inputs=act_1, units=self.num_classes)
predictions = {
              "classes": tf.argmax(input=logits, axis=1),
              "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
            }
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
            # TODO: Confirm that opt initialized once is ok?
train_op = self.optimizer.minimize(
                loss=loss,
                global_step=tf.train.get_global_step())
eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions["classes"]))

Equivalent PyTorch

class FemnistNet(nn.Module):
    def __init__(self):
        super(FemnistNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2)
        self.pool1 = nn.MaxPool2d(2, stride=2, )
        self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2)

        self.pool2 = nn.MaxPool2d(2, stride=2)
        self.fc1 = nn.Linear(3136, 2048)
        self.fc2 = nn.Linear(2048 ,62)

        
    def forward(self, x):
        x = x.view(-1, 1, 28, 28)
        x = self.conv1(x)
        x = th.nn.functional.relu(x)

        x = self.pool1(x)

        x=self.conv2(x)
        x = th.nn.functional.relu(x)
        
        x = self.pool2(x)
        
        x = x.flatten(start_dim=1)
        
        x = self.fc1(x)
        l1_activations = th.nn.functional.relu(x)
        
        x = self.fc2(l1_activations)

        x = x.softmax()

        return x, l1_activations

My training accuracy and loss are constant in the PyTorch model. What can be the issue?

I don’t know what your use case and in particular loss function is, but the last softmax operation is often wrong. In case you are working on a multi-class classification use case using nn.CrossEntropyLoss, note that this loss function expects raw logits so you would have to remove the x = x.softmax() call.
Also, make sure to add the weight decay to the optimizer for the same parameters as was done in TF.