I have class “aggregator” and “encoder” whose code are as follows. it seems that the code “self.base_model = base_model” is ineffective, since self.base_model is not used in “encoder”. But when I remove the statement, there raise a error. Is “base_model” have special function(s)?
class Encoder(nn.Module):
def __init__(self, features, feature_dim, embed_dim, adj_lists, aggregator, device,
num_sample=10, base_model=None, gcn=False):
super(Encoder, self).__init__()
self.features = features
self.feat_dim = feature_dim
self.embed_dim = embed_dim
self.adj_lists = adj_lists
self.aggregator = aggregator
self.num_sample = num_sample
if base_model is not None:
self.base_model = base_model
self.gcn = gcn
self.deivce = device
self.weight = nn.Parameter(torch.FloatTensor(
self.feat_dim if self.gcn else 2 * self.feat_dim, embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
neigh_feats = self.aggregator.forward(
nodes, [self.adj_lists[int(node)] for node in nodes], self.num_sample)
if not self.gcn:
self_feats = self.features(torch.LongTensor(nodes).to(self.device))
combined = torch.cat([self_feats, neigh_feats], dim=1) # (batch, 2 * feat_dim)
else:
combined = neigh_feats
# (batch, feat_dim) x (feat_dim, embed_dim) -> (batch, embed_dim)
combined = F.relu(combined.mm(self.weight))
return combined
in main(), I call “encoder” like this,
agg1 = MeanAggregator(features, device)
enc1 = Encoder(features, num_feat, num_embed, adj_lists, agg1, device, gcn=True)
agg2 = MeanAggregator(lambda nodes: enc1(nodes), device)
enc2 = Encoder(lambda nodes: enc1(nodes), num_embed, num_embed, adj_lists, agg2, device, base_model=enc1, gcn=True)
if I remove the statement of “base_model” in main() and “encoder”, a runtime error occur
Traceback (most recent call last):
File "C:/Users/Administrator/Desktop/mycode/deep-learning/graphsage-simple-master/graphsage/model.py", line 223, in <module>
run_cora()
File "C:/Users/Administrator/Desktop/mycode/deep-learning/graphsage-simple-master/graphsage/model.py", line 119, in run_cora
loss.backward()
File "C:\Users\Administrator\Anaconda3\envs\pytorch\lib\site-packages\torch\tensor.py", line 185, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "C:\Users\Administrator\Anaconda3\envs\pytorch\lib\site-packages\torch\autograd\__init__.py", line 125, in backward
Variable._execution_engine.run_backward(
RuntimeError: CUDA error: CUBLAS_STATUS_ALLOC_FAILED when calling `cublasCreate(handle)`
Exception raised from createCublasHandle at ..\aten\src\ATen\cuda\CublasHandlePool.cpp:8 (most recent call first):
00007FFA8DED75A200007FFA8DED7540 c10.dll!c10::Error::Error [<unknown file> @ <unknown line number>]
00007FFA2CA6AEA800007FFA2CA69E70 torch_cuda.dll!at::cuda::getCurrentCUDASparseHandle [<unknown file> @ <unknown line number>]
00007FFA2CA6A7D800007FFA2CA69E70 torch_cuda.dll!at::cuda::getCurrentCUDASparseHandle [<unknown file> @ <unknown line number>]
00007FFA2CA6B66700007FFA2CA6B1A0 torch_cuda.dll!at::cuda::getCurrentCUDABlasHandle [<unknown file> @ <unknown line number>]
00007FFA2CA6B24700007FFA2CA6B1A0 torch_cuda.dll!at::cuda::getCurrentCUDABlasHandle [<unknown file> @ <unknown line number>]
00007FFA2CA6320700007FFA2CA624B0 torch_cuda.dll!at::native::sparse_mask_cuda [<unknown file> @ <unknown line number>]
00007FFA2BF6CA9700007FFA2BF6B990 torch_cuda.dll!at::native::lerp_cuda_tensor_out [<unknown file> @ <unknown line number>]
00007FFA2BF6E4D200007FFA2BF6DF60 torch_cuda.dll!at::native::addmm_out_cuda [<unknown file> @ <unknown line number>]
00007FFA2BF6F64300007FFA2BF6F560 torch_cuda.dll!at::native::mm_cuda [<unknown file> @ <unknown line number>]
00007FFA2CAD1B0F00007FFA2CA6E0A0 torch_cuda.dll!at::native::set_storage_cuda_ [<unknown file> @ <unknown line number>]
00007FFA2CAC1B2200007FFA2CA6E0A0 torch_cuda.dll!at::native::set_storage_cuda_ [<unknown file> @ <unknown line number>]
00007FFA24B8D94900007FFA24B88FA0 torch_cpu.dll!at::bucketize_out [<unknown file> @ <unknown line number>]
00007FFA24BC057700007FFA24BC0520 torch_cpu.dll!at::mm [<unknown file> @ <unknown line number>]
00007FFA25F1EC7900007FFA25E2E010 torch_cpu.dll!torch::autograd::GraphRoot::apply [<unknown file> @ <unknown line number>]
00007FFA246D715700007FFA246D6290 torch_cpu.dll!at::indexing::TensorIndex::boolean [<unknown file> @ <unknown line number>]
00007FFA24B8D94900007FFA24B88FA0 torch_cpu.dll!at::bucketize_out [<unknown file> @ <unknown line number>]
00007FFA24CA210700007FFA24CA20B0 torch_cpu.dll!at::Tensor::mm [<unknown file> @ <unknown line number>]
00007FFA25DBB9BD00007FFA25DBA760 torch_cpu.dll!torch::autograd::profiler::Event::kind [<unknown file> @ <unknown line number>]
00007FFA25D91CF000007FFA25D91B30 torch_cpu.dll!torch::autograd::generated::MmBackward::apply [<unknown file> @ <unknown line number>]
00007FFA25D67E9100007FFA25D67B50 torch_cpu.dll!torch::autograd::Node::operator() [<unknown file> @ <unknown line number>]
00007FFA262CF9BA00007FFA262CF300 torch_cpu.dll!torch::autograd::Engine::add_thread_pool_task [<unknown file> @ <unknown line number>]
00007FFA262D03AD00007FFA262CFFD0 torch_cpu.dll!torch::autograd::Engine::evaluate_function [<unknown file> @ <unknown line number>]
00007FFA262D4FE200007FFA262D4CA0 torch_cpu.dll!torch::autograd::Engine::thread_main [<unknown file> @ <unknown line number>]
00007FFA262D4C4100007FFA262D4BC0 torch_cpu.dll!torch::autograd::Engine::thread_init [<unknown file> @ <unknown line number>]
00007FF9EE2708F700007FF9EE249F80 torch_python.dll!THPShortStorage_New [<unknown file> @ <unknown line number>]
00007FFA262CBF1400007FFA262CB780 torch_cpu.dll!torch::autograd::Engine::get_base_engine [<unknown file> @ <unknown line number>]
00007FFA96F7FA9500007FFA96F7F9F0 ucrtbase.dll!iswascii [<unknown file> @ <unknown line number>]
00007FFA996D37E400007FFA996D37D0 KERNEL32.DLL!BaseThreadInitThunk [<unknown file> @ <unknown line number>]
00007FFA99DCCB6100007FFA99DCCB40 ntdll.dll!RtlUserThreadStart [<unknown file> @ <unknown line number>]
tell me if there is any other information I can provide that would be helpful. Thanks for any suggestions.