Visdom vis.line() window disappear

I use Visdom vis.line() to draw the loss during training. But after a period of time, the loss window disappear in the env and not shown in the browser.

The loss draw function is as follows,

    def plot_current_losses(self, iter_id, losses, opt):
        """
        Plot losses in one figure current epoch
        Args:
            losses: dict
                {'loss_1': value, 'loss_2': value}
        """
        if not hasattr(self, 'plot_data'):
            self.plot_data = {'x': [], 'y': [], 'legend': list(losses.keys())}
        # @rivergold: Here is different from CycleGAN
        self.plot_data['x'].append(iter_id)
        self.plot_data['y'].append([losses[k]
                                    for k in self.plot_data['legend']])
        try:
            self.vis.line(
                # Here X shape is [n_losses, n_epoch]
                X=np.array(self.plot_data['x']),
                Y=np.array(self.plot_data['y']),
                opts={
                    'title': opt.name,
                    'legend': self.plot_data['legend'],
                    'xlabel': 'iter_id',
                    'ylabel': 'loss'},
                win=self.win_id,
                env=self.env
            )
        except ConnectionError:
            self._throw_visdom_connection_error()

There is no Error raised, but the loss window disappear…Why?

This is my snippet to display the loss curve in visdom which is working fine.

    vis = visdom.Visdom()
    loss_window = vis.line(X=torch.zeros((1,)).cpu(),
                           Y=torch.zeros((1)).cpu(),
                           opts=dict(xlabel='epoch',
                                     ylabel='Loss',
                                     title='Training Loss',
                                     legend=['Loss']))    
    vis = visdom.Visdom()
    accuracy_window = vis.line(X=torch.zeros((1,)).cpu(),
                           Y=torch.zeros((1)).cpu(),
                           opts=dict(xlabel='epoch',
                                     ylabel='accuracy',
                                     title='Training accuracy',
                                     legend=['accuracy']))    
#    
    for epoch in range(200):  # loop over the dataset multiple times
        
        t0 = time.time()
        
        running_loss = 0
        total_train = 0
        correct_train = 0
        
        model.train()
        for i, data in enumerate(train_loader, 0):
            # get the inputs
            t_image, mask = data
            t_image, mask = Variable(t_image.to(device)), Variable(mask.to(device))
        

            # zeroes the gradient buffers of all parameters
            optimizer.zero_grad()
            # forward + backward + optimize
            outputs = model(t_image) # forward
            loss = criterion(outputs, mask) # calculate the loss
            loss.backward() # back propagation
            optimizer.step() # update gradients
            running_loss += loss.item()
            
            # accuracy
            _, predicted = torch.max(outputs.data, 1)
            total_train += mask.nelement()  # number of pixel in the batch
            correct_train += predicted.eq(mask.data).sum().item() # sum all precited pixel values
            train_accuracy = 100 * correct_train / total_train
           #avg_accuracy = train_accuracy / len(train_loader)                                     
            
            print('Epoch {}, train Loss: {:.3f}'.format(epoch, loss.item()), "Training Accuracy: %d %%" % (train_accuracy), '{:.3f} seconds'.format(time.time() - t0))
            
            
            values = [epoch+1, running_loss, train_accuracy]
            export_history(header, values, save_dir, save_file_name)

            vis.line(
                    X=torch.ones((1, 1)).cpu()*epoch,
                    Y=torch.Tensor([loss]).unsqueeze(0).cpu(),
                    win=loss_window,
                    update='append')
            
            vis.line(
                    X=torch.ones((1, 1)).cpu()*epoch,
                    Y=torch.Tensor([train_accuracy]).unsqueeze(0).cpu(),
                    win=accuracy_window,
                    update='append')

Also, there is some good example here.