DDP inference issue

Hello! I saved the model in the middle of one epoch and I could also load the model without any errors. However, the predicted results were quite different from what I saved during training. Any suggestions, thanks!

#training
for epoch in range(num_epochs):
	for seqOhe, hicMat in train_iter:
		model.train()
		i += 1
		outputs = model(seqOhe.to(rank))
		
		loss = ((torch.square(hicMat.to(rank) - outputs))/(256 * 256 * bs)).sum()

		if epoch % 5 == 0 and epoch >= 5 and i == 500:
			if rank == 0:
				torch.save({'model_state_dict': model.state_dict(),
										'model': model.module.state_dict(),
										'optimizer_state_dict': optimizer.state_dict(),
										'seq': seqOhe,
										'pred': outputs,
										'real': hicMat,
										'loss': loss}, "model.pt") 


		optimizer.zero_grad()
		# computing gradients
		loss.backward()
		nn.utils.clip_grad_norm_(model.parameters(), 1.0)
		# accumulating running loss
		running_loss += loss.item()
		# updated weights based on computed gradients
		optimizer.step()

#inference
checkpoint = torch.load("model.pt", map_location=torch.device('cpu'))
new_state_dict = OrderedDict()
for key, val in checkpoint['model_state_dict'].items():
    name = key[7:]
    new_state_dict[name] = val

model.load_state_dict(new_state_dict)
model.to(device)
model.eval()

seqOhe    = checkpoint['seq']
with torch.no_grad():
    outputs = model(seqOhe.to(device, non_blocking=True))