When I print x , it works well for a long time. but when I dont print , it doesnt work at all?

Here is where I print.

for k in range(3):
            for i in range(3):
                for j in range(3):
                    x = f.sigmoid(self.mutation[i][j](objectModelOutput[9*k+3*i+j]))
                    x = f.sigmoid(self.fc2[i](x))
                    x = f.sigmoid(self.fc3[i](x))
                    print(x) #Where I print x
                    objectModelOutput.append(nn.Parameter(f.sigmoid(self.fc4[i](x1))))
tensor([[0.0034]], device='cuda:0', grad_fn=<AddmmBackward>)
tensor([[-0.0034]], device='cuda:0', grad_fn=<SubBackward0>)
tensor([nan], device='cuda:0', grad_fn=<IndexBackward>)
tensor([nan], device='cuda:0', grad_fn=<DivBackward0>) tensor([0.2443], device='cuda:0')
tensor([[nan]], device='cuda:0', grad_fn=<MinimumBackward>)
[W ..\torch\csrc\autograd\python_anomaly_mode.cpp:104] Warning: Error detected in SoftmaxBackward. Traceback of forward call that caused the error:
  File ".\main.py", line 402, in <module>
    trainer.train()
  File ".\main.py", line 380, in train
    self.update()
  File ".\main.py", line 290, in update
    action_prob = self.ppo.actor(state[index].reshape(1,3,210,160))[0,action[index]] # new policy
  File "C:\Users\afuler\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
  File ".\main.py", line 87, in forward
    x = self.softmax(x)
  File "C:\Users\afuler\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
  File "C:\Users\afuler\anaconda3\lib\site-packages\torch\nn\modules\activation.py", line 1198, in forward
    return F.softmax(input, self.dim, _stacklevel=5)
  File "C:\Users\afuler\anaconda3\lib\site-packages\torch\nn\functional.py", line 1512, in softmax
    ret = input.softmax(dim)
 (function _print_stack)
  0%|                                                                                                                                                            | 0/167 [00:01<?, ?it/s]
Traceback (most recent call last):
  File ".\main.py", line 402, in <module>
    trainer.train()
  File ".\main.py", line 380, in train
    self.update()
  File ".\main.py", line 305, in update
    action_loss.backward(retain_graph=True)
  File "C:\Users\afuler\anaconda3\lib\site-packages\torch\tensor.py", line 221, in backward
    torch.autograd.backward(self, gradient, retain_graph, create_graph)
  File "C:\Users\afuler\anaconda3\lib\site-packages\torch\autograd\__init__.py", line 130, in backward
    Variable._execution_engine.run_backward(
RuntimeError: Function 'SoftmaxBackward' returned nan values in its 0th output.

Here is the full code:

class actor(nn.Module):
    def __init__(self,action_num):
        super(actor,self).__init__()
        self.cnn1 = nn.Conv2d(3,12,4,stride = 4,padding=2)
        self.cnn2 = nn.Conv2d(12,16,4,stride = 4,padding=1)
        self.attention = []
        for i in range(3):
            self.attention.append(nn.Conv2d(16,32,4,stride = 2,padding=1))
        self.attention = nn.ModuleList(self.attention)
        self.cnn3 = []
        for i in range(3):
            self.cnn3.append(nn.Conv2d(16,32,4,stride = 2,padding=1))
        self.cnn3 = nn.ModuleList(self.cnn3)
        self.flatten = []
        for i in range(3):
            self.flatten.append(nn.Flatten())
        self.flatten = nn.ModuleList(self.flatten)
        self.fc1 = []
        for i in range(3):
            self.fc1.append(nn.Linear(960,64))
        self.fc1 = nn.ModuleList(self.fc1)

        self.hidden = torch.empty((1,64)).cuda()

        self.fc2 = []
        for i in range(3):
            self.fc2.append(nn.Linear(128,64))
        self.fc2 = nn.ModuleList(self.fc2)
        self.fc3 = []
        for i in range(3):
            self.fc3.append(nn.Linear(64,64))
        self.fc3 = nn.ModuleList(self.fc3)
        self.fc4 = []
        for i in range(3):
            self.fc4.append(nn.Linear(64,64))
        self.fc4 = nn.ModuleList(self.fc4)
        self.mutation= []
        for i in range(3):
            self.mutation.append(nn.ModuleList([]))
            for k in range(3):
                self.mutation[i].append(nn.Sequential(nn.Linear(64,64),
                                        nn.Sigmoid(),
                                        nn.Linear(64,128)))
        self.mutation = nn.ModuleList(self.mutation)
        self.fc5 = nn.Linear(64*3,64)
        self.fc6 = nn.Linear(64,64)
        self.fc7 = nn.Linear(64,action_num)
        self.softmax = nn.Softmax()

    def forward(self,x):
        x = f.relu(self.cnn1(x))
        x = f.relu(self.cnn2(x))
        
        objectModelOutput = nn.ParameterList([])
        for i in range(3):
            attention = f.relu(self.attention[i](x))
            x1 = f.relu(self.cnn3[i](x))
            x1 = x1.mul(attention)
            x1 = f.sigmoid(self.flatten[i](x1))
            x1 = f.sigmoid(self.fc1[i](x1))
            x1 = torch.cat((x1,self.hidden),-1)
            x1 = f.sigmoid(self.fc2[i](x1))
            x1 = f.sigmoid(self.fc3[i](x1))
            objectModelOutput.append(nn.Parameter(f.sigmoid(self.fc4[i](x1))))

        for k in range(3):
            for i in range(3):
                for j in range(3):
                    x = f.sigmoid(self.mutation[i][j](objectModelOutput[9*k+3*i+j]))
                    x = f.sigmoid(self.fc2[i](x))
                    x = f.sigmoid(self.fc3[i](x))
                    # print(x)
                    objectModelOutput.append(nn.Parameter(f.sigmoid(self.fc4[i](x1))))
        x = torch.cat([objectModelOutput[-3],objectModelOutput[-2],objectModelOutput[-1]],-1)
        # print(x)
        x = f.sigmoid(self.fc5(x))
        self.hidden = x.clone()
        x = f.sigmoid(self.fc6(x))
        x = f.sigmoid(self.fc7(x))
        x = self.softmax(x)
        return x
    
    def initHidden(self):
        self.hidden = torch.empty((1,64)).cuda()

Why the softmax output NaN?

In case you are using an older PyTorch version, could you update to the latest stable release (1.8.0) or the nightly and rerun your script, please?