AttributeError: Can't pickle local object 'get_dataloader.<locals>.train_trans'

Hi all,
I hope everybody reading this is having a great day.

I have a problem with AttributeeError. I found the error solution but couldn’t find.
I use window10, cuda 11.1, pytorch

baselin.py

import os

import time

import numpy as np

import warnings

import random

import torch

import torch.nn as nn

import torch.optim as optim

from option import get_args

from learning.minicity import MiniCity

from learning.learner import train_epoch, validate_epoch, predict

from learning.utils import get_dataloader, get_lossfunc, get_model

from helpers.helpers import plot_learning_curves

import torchvision.transforms.functional as TF

def main():

args = get_args()

print("args : ", args)

# Fix seed

if args.seed is not None:

    torch.manual_seed(random_seed)

    torch.cuda.manual_seed(random_seed)

    torch.cuda.manual_seed_all(random_seed)

    torch.backends.cudnn.deterministic = True

    torch.backends.cudnn.benchmark = False

    np.random.seed(random_seed)

    random.seed(random_seed)

    warnings.warn('You have chosen to seed training. '

                  'This will turn on the CUDNN deterministic setting, '

                  'which can slow down your training considerably! '

                  'You may see unexpected behavior when restarting from checkpoints.')

assert args.crop_size[0] <= args.train_size[0] and args.crop_size[1] <= args.train_size[1], \

'Must be Crop size <= Image Size.'



# Create directory to store run files

if not os.path.isdir(args.save_path):

    os.makedirs(args.save_path + '/images')

if not os.path.isdir(args.save_path + '/results_color_val'):

    os.makedirs(args.save_path + '/results_color_val')

    os.makedirs(args.save_path + '/results_color_test')



Dataset = MiniCity

dataloaders = get_dataloader(Dataset, args)

criterion = get_lossfunc(Dataset, args)

model = get_model(Dataset, args)

print(model)

optimizer = torch.optim.SGD(model.parameters(), lr=args.lr_init, momentum=args.lr_momentum, weight_decay=args.lr_weight_decay)

scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)



# Initialize metrics

best_miou = 0.0

metrics = {'train_loss' : [],

           'train_acc' : [],

           'val_acc' : [],

           'val_loss' : [],

           'miou' : []}

start_epoch = 0



# Resume training from checkpoint

if args.weights:

    print('Resuming training from {}.'.format(args.weights))

    checkpoint = torch.load(args.weights)

    model.load_state_dict(checkpoint['model_state_dict'], strict=True)

    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    metrics = checkpoint['metrics']

    best_miou = checkpoint['best_miou']

    start_epoch = checkpoint['epoch']+1



# Push model to GPU

if torch.cuda.is_available():

    model = torch.nn.DataParallel(model).cuda()

    print('Model pushed to {} GPU(s), type {}.'.format(torch.cuda.device_count(), torch.cuda.get_device_name(0)))

# No training, only running prediction on test set

if args.predict:

    checkpoint = torch.load(args.save_path + '/best_weights.pth.tar')

    model.load_state_dict(checkpoint['model_state_dict'], strict=True)

    print('Loaded model weights from {}'.format(args.save_path + '/best_weights.pth.tar'))

    # Create results directory

    if not os.path.isdir(args.save_path + '/results_val'):

        os.makedirs(args.save_path + '/results_val')

    if not os.path.isdir(args.save_path + '/results_test'):

        os.makedirs(args.save_path + '/results_test')

    predict(dataloaders['test'], model, Dataset.mask_colors, folder=args.save_path, mode='test', args=args)

    predict(dataloaders['val'], model, Dataset.mask_colors, folder=args.save_path, mode='val', args=args)

    return



# Generate log file

with open(args.save_path + '/log_epoch.csv', 'a') as epoch_log:

    epoch_log.write('epoch, train loss, val loss, train acc, val acc, miou\n')



since = time.time()



for epoch in range(start_epoch, args.epochs):

    # Train

    print('--- Training ---')

    train_loss, train_acc = train_epoch(dataloaders['train'], model, criterion, optimizer, scheduler, epoch, void=Dataset.voidClass, args=args)

    metrics['train_loss'].append(train_loss)

    metrics['train_acc'].append(train_acc)

    print('Epoch {} train loss: {:.4f}, acc: {:.4f}'.format(epoch,train_loss,train_acc))

    

    # Validate

    print('--- Validation ---')

    val_acc, val_loss, miou = validate_epoch(dataloaders['val'], model, criterion, epoch,

                                             Dataset.classLabels, Dataset.validClasses, void=Dataset.voidClass,

                                             maskColors=Dataset.mask_colors, folder=args.save_path, args=args)

    metrics['val_acc'].append(val_acc)

    metrics['val_loss'].append(val_loss)

    metrics['miou'].append(miou)

    

    # Write logs

    with open(args.save_path + '/log_epoch.csv', 'a') as epoch_log:

        epoch_log.write('{}, {:.5f}, {:.5f}, {:.5f}, {:.5f}, {:.5f}\n'.format(

                epoch, train_loss, val_loss, train_acc, val_acc, miou))

    

    # Save checkpoint

    torch.save({

        'epoch': epoch,

        'model_state_dict': model.state_dict(),

        'optimizer_state_dict': optimizer.state_dict(),

        'best_miou': best_miou,

        'metrics': metrics,

        }, args.save_path + '/checkpoint.pth.tar')

    

    # Save best model to file

    if miou > best_miou:

        print('mIoU improved from {:.4f} to {:.4f}.'.format(best_miou, miou))

        best_miou = miou

        torch.save({

            'epoch': epoch,

            'model_state_dict': model.state_dict(),

            }, args.save_path + '/best_weights.pth.tar')

            

time_elapsed = time.time() - since

print('Training complete in {:.0f}m {:.0f}s'.format(

    time_elapsed // 60, time_elapsed % 60))



plot_learning_curves(metrics, args)

# Load best model

checkpoint = torch.load(args.save_path + '/best_weights.pth.tar')

model.load_state_dict(checkpoint['model_state_dict'], strict=True)

print('Loaded best model weights (epoch {}) from {}/best_weights.pth.tar'.format(checkpoint['epoch'], args.save_path))



# Create results directory

if not os.path.isdir(args.save_path + '/results_val'):

    os.makedirs(args.save_path + '/results_val')

if not os.path.isdir(args.save_path + '/results_test'):

    os.makedirs(args.save_path + '/results_test')

# Run prediction on validation set. For predicting on test set, simple replace 'val' by 'test'

predict(dataloaders['val'], model, Dataset.mask_colors, folder=args.save_path, mode='val', args=args)

if name == ‘main’:

main()

Result
python baseline.py --save_path baseline_run_deeplabv3_resnet50 --crop_size 576 1152 --batch_size 1
args : Namespace(batch_size=1, colorjitter_factor=0.3, copyblob=False, crop_size=[576, 1152], cutmix=False, dataset_mean=[0.485, 0.456, 0.406], dataset_path=’./minicity’, dataset_std=[0.229, 0.224, 0.225], epochs=200, focal_gamma=2.0, hflip=True, loss=‘ce’, lr_init=0.01, lr_momentum=0.9, lr_weight_decay=0.0001, model=‘DeepLabv3_resnet50’, mst=False, norm=‘batch’, num_workers=8, pin_memory=True, predict=False, save_path=‘baseline_run_deeplabv3_resnet50’, seed=None, test_size=[1024, 2048], train_size=[1024, 2048], weights=None)
DeepLabV3(
(backbone): IntermediateLayerGetter(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): Bottleneck(
(conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer2): Sequential(
(0): Bottleneck(
(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(3): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer3): Sequential(
(0): Bottleneck(
(conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(3): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(4): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(5): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer4): Sequential(
(0): Bottleneck(
(conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
)
(classifier): DeepLabHead(
(0): ASPP(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(2048, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(1): ASPPConv(
(0): Conv2d(2048, 256, kernel_size=(3, 3), stride=(1, 1), padding=(12, 12), dilation=(12, 12), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(2): ASPPConv(
(0): Conv2d(2048, 256, kernel_size=(3, 3), stride=(1, 1), padding=(24, 24), dilation=(24, 24), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(3): ASPPConv(
(0): Conv2d(2048, 256, kernel_size=(3, 3), stride=(1, 1), padding=(36, 36), dilation=(36, 36), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(4): ASPPPooling(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(2048, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU()
)
)
(project): Sequential(
(0): Conv2d(1280, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): Dropout(p=0.5, inplace=False)
)
)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU()
(4): Conv2d(256, 20, kernel_size=(1, 1), stride=(1, 1))
)
)
Model pushed to 1 GPU(s), type NVIDIA GeForce GTX 1660 SUPER.
— Training —
Traceback (most recent call last):
File “baseline.py”, line 166, in
main()
File “baseline.py”, line 106, in main
train_loss, train_acc = train_epoch(dataloaders[‘train’], model, criterion, optimizer, scheduler, epoch, void=Dataset.voidClass, args=args)
File “C:\Users\kaist\Desktop\tutorial\folder\learning\learner.py”, line 41, in train_epoch
for epoch_step, (inputs, labels, _) in enumerate(dataloader):
File “C:\Users\kaist\anaconda3\lib\site-packages\torch\utils\data\dataloader.py”, line 359, in iter
return self._get_iterator()
File “C:\Users\kaist\anaconda3\lib\site-packages\torch\utils\data\dataloader.py”, line 305, in _get_iterator
return _MultiProcessingDataLoaderIter(self)
File “C:\Users\kaist\anaconda3\lib\site-packages\torch\utils\data\dataloader.py”, line 918, in init
w.start()
File “C:\Users\kaist\anaconda3\lib\multiprocessing\process.py”, line 121, in start
self._popen = self._Popen(self)
File “C:\Users\kaist\anaconda3\lib\multiprocessing\context.py”, line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File “C:\Users\kaist\anaconda3\lib\multiprocessing\context.py”, line 327, in _Popen
return Popen(process_obj)
File “C:\Users\kaist\anaconda3\lib\multiprocessing\popen_spawn_win32.py”, line 93, in init
reduction.dump(process_obj, to_child)
File “C:\Users\kaist\anaconda3\lib\multiprocessing\reduction.py”, line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can’t pickle local object ‘get_dataloader..train_trans’

(base) C:\Users\kaist\Desktop\tutorial\folder>Traceback (most recent call last):
File “”, line 1, in
File “C:\Users\kaist\anaconda3\lib\multiprocessing\spawn.py”, line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File “C:\Users\kaist\anaconda3\lib\multiprocessing\spawn.py”, line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input

Would anybody be kind enough to tell me what is problem is

Could you check how train_trans is defined as you might be running into a similar issue as described here.

Thank you. I will check right now.