AttributeError: 'DataLoader' object has no attribute 'pin_memory_device'

Hello,

I’m running pytorch lightning with ray.

When I run this function:


def run_ray(metric='val_acc', mode='max',num_samples=3,config_dict={}, checkpoint_file_name="penetration_move_to_oracle_ray_ckpt",config_file='penetration/best_config.txt',local_dir='penetration/runs/'):
    hyperopt_search = HyperOptSearch(metric=metric, mode=mode)

    #change from gpu {"gpu": 1}
    tuner = tune.Tuner(tune.with_resources(train_fn,{"gpu": 1}), tune_config=tune.TuneConfig(num_samples=num_samples,search_alg=hyperopt_search),param_space=config_dict,run_config= RunConfig(local_dir=local_dir))
    results = tuner.fit()   
    best_result = results.get_best_result(metric=metric, mode=mode) 

    config_file = open(config_file, 'a')
    config_file.write(str(best_result.config) + '\n')

    best_checkpoint = best_result.checkpoint
    path = os.path.join(str(best_checkpoint.to_directory()), checkpoint_file_name)
    print(path)

    model = GraphLevelGNN.load_from_checkpoint(path)
    config_file.write(str(best_result.log_dir))
    config_file.close()

    return best_result.log_dir,model

Like this:

train_dataset_file = open('train_dataset.pkl','rb')
train_dataset=pickle.load(train_dataset_file, encoding='latin1')

train_seqs_file = open('train_seqs.pkl','rb')
train_seqs=pickle.load(train_seqs_file, encoding='latin1')

test_dataset_file = open('test_dataset.pkl','rb')
test_dataset=pickle.load(test_dataset_file, encoding='latin1')

test_seqs_file = open('test_seqs.pkl','rb')
test_seqs=pickle.load(test_seqs_file, encoding='latin1')

val_dataset_file = open('val_dataset.pkl','rb')
val_dataset=pickle.load(val_dataset_file, encoding='latin1')

val_seqs_file = open('val_seqs.pkl','rb')
val_seqs=pickle.load(val_seqs_file, encoding='latin1')

graph_train_loader_file = open('graph_train_loader.pkl','rb')
graph_train_loader=pickle.load(graph_train_loader_file, encoding='latin1')


graph_test_loader_file = open('graph_test_loader.pkl','rb')
graph_test_loader=pickle.load(graph_test_loader_file, encoding='latin1')

graph_val_loader_file = open('graph_val_loader.pkl','rb')
graph_val_loader=pickle.load(graph_val_loader_file, encoding='latin1')

#Step 2: Decide on the space to explore with HPO
config_dict = {
        "c_hidden": tune.choice([32,64,128,256,512,1024,2056]),
        "dp_rate_linear":tune.uniform(0.4,0.8),
        "num_layers":tune.randint(3, 20),
        "activation_function":tune.choice(['nn.ReLU(inplace=True)','nn.LeakyReLU(inplace=True)','nn.Sigmoid()','nn.Tanh()']),
        "optimizer_name" : tune.choice(['SGD','NAdam', 'Adam','RMSProp', 'ASGD', 'LBFGS','AdamW','Adadelta']),
        "learning_rate":tune.uniform(0.0001,0.1),
           }

best_result_log_path,model = run_ray(config_dict=config_dict)

I get the error:

Sanity Checking: 0it [00:00, ?it/s]
2023-01-12 16:24:51,170 ERROR trial_runner.py:980 -- Trial train_fn_9d4a965b: Error processing event.
ray.exceptions.RayTaskError(AttributeError): ray::ImplicitFunc.train() (pid=83179, ip=10.0.0.106, repr=train_fn)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/ray/tune/trainable/trainable.py", line 347, in train
    result = self.step()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 417, in step
    self._report_thread_runner_error(block=True)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 589, in _report_thread_runner_error
    raise e
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 289, in run
    self._entrypoint()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 362, in entrypoint
    return self._trainable_func(
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 684, in _trainable_func
    output = fn()
  File "/home/ubuntu/penetration_move_to_oracle2/penetration_v1.py", line 320, in train_fn
    train_graph_classifier(
  File "/home/ubuntu/penetration_move_to_oracle2/penetration_v1.py", line 306, in train_graph_classifier
    model = trainer.fit(model, graph_train_loader, graph_val_loader)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 770, in fit
    self._call_and_handle_interrupt(
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 723, in _call_and_handle_interrupt
    return trainer_fn(*args, **kwargs)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 811, in _fit_impl
    results = self._run(model, ckpt_path=self.ckpt_path)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 1236, in _run
    results = self._run_stage()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 1323, in _run_stage
    return self._run_train()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 1345, in _run_train
    self._run_sanity_check()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 1413, in _run_sanity_check
    val_loop.run()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/loops/base.py", line 204, in run
    self.advance(*args, **kwargs)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py", line 155, in advance
    dl_outputs = self.epoch_loop.run(self._data_fetcher, dl_max_batches, kwargs)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/loops/base.py", line 199, in run
    self.on_run_start(*args, **kwargs)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py", line 88, in on_run_start
    self._data_fetcher = iter(data_fetcher)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/pytorch_lightning/utilities/fetching.py", line 178, in __iter__
    self.dataloader_iter = iter(self.dataloader)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 444, in __iter__
    return self._get_iterator()
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 387, in _get_iterator
    return _SingleProcessDataLoaderIter(self)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 712, in __init__
    super(_SingleProcessDataLoaderIter, self).__init__(loader)
  File "/home/ubuntu/miniconda3/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 638, in __init__
    if (len(loader.pin_memory_device) == 0):
AttributeError: 'DataLoader' object has no attribute 'pin_memory_device'

Could someone explain what this means?

There is no error just from reading in the data sets/loaders from pickle files, it’s only when you add the run_ray function that the error occurs.

The versions of the libraries I’m using are:

torch 1.12.1
torch-cluster 1.6.0
torch-geometric 2.1.0.post1
torch-scatter 2.0.9
torch-sparse 0.6.15
torchmetrics 0.10.0
ray 2.0.0
pytorch-lightning 1.6.5

This PR added the pin_memory_device attribute to the DataLoader in April 2022, so your PyTorch release should already contain it since 1.12.1 was released in August 2022.
Could you double check if you are indeed using this version as I can access this attribute in 1.12.1:

>>> torch.__version__
'1.12.1+cu116'
>>> loader = torch.utils.data.DataLoader(torch.randn(1, 1), pin_memory=True, pin_memory_device="cuda")
>>> loader.pin_memory_device
'cuda'
1 Like

What is the effect of pin_memory_device? I expect dataloader copy the batch tensor to pin_memory_device, but it doesn’t actually.

From the linked PR:

pin_memory, has optional device parameter to specify
which device you want to pin for. With this above change
the Dataloader will work only for CUDA backend. To add
support for other backend which supports pinned memory,
dataloader is updated with device as optional parameter.

so it seems the argument is an optional argument for other backends.

wow, thanks for your reply.

xtrain = torch.load(‘./dataSet3/data_train_test/xtrain.pt’)
xtest = torch.load(‘./dataSet3/data_train_test/xtest.pt’)

#torch.save(xtrain, ‘./{}/data_train_test/xtrain.pt’.format(name))
#torch.save(xtest, ‘./{}/data_train_test/xtest.pt’.format(name))

lr = 0.001

for nq in [4,5,6]:

for L in [2,4,6]:
	loss_hist1 = []
	acc_hist1 = []
	for i in range(6):
		net = model1(nq,L)
		loss,acc=train.train(net,xtrain,xtest,40,lr,i+1,1,nq,L)
		loss_hist1.append(loss)
		acc_hist1.append(acc)

	np.savetxt('./{}/loss_model1_NQ_{}_L_{}_lr_{}.txt'.format(name,nq,L,lr),loss_hist1)
	np.savetxt('./{}/acc_model1_NQ_{}_L_{}_lr_{}.txt'.format(name,nq,L,lr),acc_hist1)

	loss_hist2 = []
	acc_hist2 = []
	for i in range(6):
		net = model2(nq,L)
		loss,acc=train.train(net,xtrain,xtest,40,lr,i+1,2,nq,L)
		loss_hist2.append(loss)
		acc_hist2.append(acc)

	np.savetxt('./{}/loss_model2_NQ_{}_L_{}_lr_{}.txt'.format(name,nq,L,lr),loss_hist2)
	np.savetxt('./{}/acc_model2_NQ_{}_L_{}_lr_{}.txt'.format(name,nq,L,lr),acc_hist2)

def train(net,xtrain,xtest,epochs,lr,ind,aa,NQ,LL):
optimizer = optim.Adam(net.parameters(), lr=lr)
Loss = nn.MSELoss()

acc = []
loss = []

for epoch in range(epochs):
	soma = 0
	ss = 1
	net.train()
	with tqdm(xtrain, unit="batch") as tepoch:
		for x, y in tepoch:
			tepoch.set_description(f" Model{aa}:{ind} lr:{lr} NQ:{NQ} L:{LL} {epoch+1}/{epochs} ")
			out = net(x).float()
			target = F.one_hot(y, num_classes=3).float()
			l = Loss(out,target)
			soma += l.item()
			optimizer.zero_grad()
			l.backward()
			optimizer.step()
			ss+=1

		loss.append(soma/len(xtrain))

	net.eval()
	soma_acc = 0
	L = 0
	for x,y in xtest:
		out = net(x)
		_, preds = torch.max(out, 1)
		soma_acc += torch.sum(preds == y.data)
		L += len(y.data)

	acc.append( soma_acc/L )

return np.array(loss),np.array(acc)

why does the problem occur?