Loss is None during training

class SyNet(sy.Module):
def init(self, torch_ref):
super(SyNet, self).init(torch_ref=torch_ref)
self.layer1 = self.torch_ref.nn.Linear(in_dim, 20)
self.layer2 = self.torch_ref.nn.Linear(20, 30)
self.out = self.torch_ref.nn.Linear(30, out_dim)

def forward(self, X):
    X = self.torch_ref.nn.functional.relu(self.layer1(X))
    X = self.torch_ref.nn.functional.relu(self.layer2(X))
    output = self.torch_ref.nn.functional.relu(self.out(X), dim=1)
    return output

local_model = SyNet(torch)
remote_model = local_model.send(duet)
remote_torch =duet.torch
params = remote_model.parameters()
optim = remote_torch.optim.Adam(params=params, lr=0.00000001)
#%%pixie_debugger
start_time = time.time()
#import pdb; pdb.set_trace()
#from IPython.core.debugger import Tracer; Tracer()()
def train(iterations, model, torch_ref, optim, data_ptr, target_ptr):

losses = []

for i in range(iterations):

    optim.zero_grad()

    output = model(data_ptr)

    loss = torch_ref.nn.functional.mse_loss(output, target_ptr.long())

    loss_item = loss.item()

    loss_value = loss_item.get(
        reason="To evaluate training progress", request_block=True, timeout_secs=5
    )

    if i % 10 == 0:
        print("Epoch", i, "loss", loss_value)

    losses.append(loss_value)

    loss.backward()

    optim.step()

return losses

print(“Computation Time:- %s seconds -” % (time.time() - start_time))
iteration = 100
losses = train(iteration, remote_model, remote_torch, optim, data_ptr, target_ptr)
I am getting this from the training
poch 0 loss None
Epoch 10 loss None
Epoch 20 loss None
Epoch 30 loss None
Epoch 40 loss None
Epoch 50 loss None
Epoch 60 loss None
Epoch 70 loss None
Epoch 80 loss None
Epoch 90 loss None