BrokenPipeError: [WinError 232]

After training some epochs, the error suddenly jump. However, I have know idea how to fix it after reading the error message below. Did anyone know what happened?
By the way, the error occurred after I add if __name__=='__main__' to wrap the training python code ,specify the dataloader num_worker and pin_memory, and these:

torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
torch.autograd.set_detect_anomaly(False)
torch.autograd.profiler.profile(False)
torch.autograd.profiler.emit_nvtx(False)

,is their any alternatively way to code it? Thank you.

from resnetmodel2 import UnetLike_Residual_AutoEncoder
import torch
import numpy as np
from dataset import SurDataset
from utils import save_checkpoint, load_checkpoint, save_some_examples, seed_everything
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn as nn
import torch.optim as optim
import config
from tqdm import tqdm
if __name__ == '__main__':
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True
    torch.autograd.set_detect_anomaly(False)
    torch.autograd.profiler.profile(False)
    torch.autograd.profiler.emit_nvtx(False)
    seed_everything(42)
    print(config.DEVICE)
    model = UnetLike_Residual_AutoEncoder(in_channels=1, out_channels=1).to(config.DEVICE)
    optimizer = optim.Adam(list(model.parameters()),
            lr = config.LEARNING_RATE,
            betas=(0.5, 0.999))
    mse = nn.MSELoss()
    scalar = torch.cuda.amp.GradScaler()
    dataset = SurDataset(csv_dir=config.CSV_DIR, img_dir=config.IMG_DIR)
    train_set, val_set = torch.utils.data.random_split(dataset, [12000, len(dataset)-12000], )
    loader = DataLoader(dataset=train_set, batch_size=config.BATCH_SIZE, shuffle=True, num_workers=config.NUM_WORKER, pin_memory=config.PIN_MEMORY)
    val_loader = DataLoader(dataset=val_set, batch_size=config.BATCH_SIZE, num_workers=config.NUM_WORKER, pin_memory=config.PIN_MEMORY)
    loop = tqdm(loader)


    if config.LOAD_MODEL:
        load_checkpoint(
            config.CHECKPOINT, model, optimizer, config.LEARNING_RATE,
        )
    model.train()
    for epoch in range(config.NUM_EPOCHS):
        print(f"epoch {epoch+1}/{config.NUM_EPOCHS}:")
        losses = []
        for idx, (csv_, target) in enumerate(loop):
            csv_ = csv_.to(config.DEVICE)
            target = target.to(config.DEVICE)
            predict = model(csv_)
            loss = mse(predict, target)
            losses.append(loss.item())
            optimizer.zero_grad()
            loss.backward()
            # gradient descent or adam step
            optimizer.step()
            if idx % 5 == 0:
                save_some_examples(model, val_loader, epoch, folder=config.EVALUATE_FOLDERNAME)
        if config.SAVE_MODEL:# and epoch % 2 == 0:
            save_checkpoint(model, optimizer, filename=config.CHECKPOINT)

        print(f"loss at epoch {epoch+1}/{config.NUM_EPOCHS} is {np.sum(losses):.4f}.")




        
=> Saving checkpoint
loss at epoch 1/30 is 55.9952.
epoch 2/30:
loss at epoch 2/30 is 54.4250.
epoch 3/30:
=> Saving checkpoint
loss at epoch 3/30 is 52.9719.
epoch 4/30:
Traceback (most recent call last):
  File "C:\Users\PML\.conda\envs\floren\lib\multiprocessing\queues.py", line 238, in _feed
    send_bytes(obj)
  File "C:\Users\PML\.conda\envs\floren\lib\multiprocessing\connection.py", line 200, in send_bytes
    self._send_bytes(m[offset:offset + size])
  File "C:\Users\PML\.conda\envs\floren\lib\multiprocessing\connection.py", line 280, in _send_bytes
    ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
BrokenPipeError: [WinError 232]

@ptrblck Could you help me, thank you a lot