I am trying to run SAGEConv model with torch.compile
mode i.e after defining the model I am using.
model = torch_geometric.compile(model)
but receiving an error:
`BackendCompilerFailed Traceback (most recent call last)
Cell In[20], line 5
3 for epoch in range(1, 5):
4 train_start = time.time()
----> 5 loss, acc = train(epoch)
6 print(“— %s train seconds —” % (time.time() - train_start))
7 train_times.append(time.time() - train_start)
Cell In[16], line 10, in train(epoch)
8 for batch in train_loader:
9 optimizer.zero_grad()
—> 10 out = model(batch.x, batch.edge_index.to(device))[:batch.batch_size]
11 y = batch.y[:batch.batch_size].squeeze()
12 loss = F.cross_entropy(out, y)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
→ 1518 return self._call_impl(*args, **kwargs)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don’t have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py:328, in _TorchDynamoContext.call.._fn(*args, **kwargs)
326 dynamic_ctx.enter()
327 try:
→ 328 return fn(*args, **kwargs)
329 finally:
330 set_eval_frame(prior)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
→ 1518 return self._call_impl(*args, **kwargs)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don’t have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py:490, in catch_errors_wrapper..catch_errors(frame, cache_entry, frame_state)
487 return hijacked_callback(frame, cache_entry, hooks, frame_state)
489 with compile_lock, _disable_current_modes():
→ 490 return callback(frame, cache_entry, hooks, frame_state)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py:641, in convert_frame.._convert_frame(frame, cache_size, hooks, frame_state)
639 counters[“frames”][“total”] += 1
640 try:
→ 641 result = inner_convert(frame, cache_size, hooks, frame_state)
642 counters[“frames”][“ok”] += 1
643 return result
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py:133, in wrap_convert_context.._fn(*args, **kwargs)
131 cleanup = setup_compile_debug()
132 try:
→ 133 return fn(*args, **kwargs)
134 finally:
135 cleanup.close()
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py:389, in convert_frame_assert.._convert_frame_assert(frame, cache_entry, hooks, frame_state)
376 compile_id = CompileId(frame_id, frame_compile_id)
378 signpost_event(
379 “dynamo”,
380 “_convert_frame_assert._compile”,
(…)
386 },
387 )
→ 389 return _compile(
390 frame.f_code,
391 frame.f_globals,
392 frame.f_locals,
393 frame.f_builtins,
394 compiler_fn,
395 one_graph,
396 export,
397 export_constraints,
398 hooks,
399 cache_size,
400 frame,
401 frame_state=frame_state,
402 compile_id=compile_id,
403 )
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py:569, in _compile(code, globals, locals, builtins, compiler_fn, one_graph, export, export_constraints, hooks, cache_size, frame, frame_state, compile_id)
567 with compile_context(CompileContext(compile_id)):
568 try:
→ 569 guarded_code = compile_inner(code, one_graph, hooks, transform)
570 return guarded_code
571 except (
572 Unsupported,
573 TorchRuntimeError,
(…)
578 ValidationException,
579 ) as e:
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/utils.py:189, in dynamo_timed..dynamo_timed_inner..time_wrapper(*args, **kwargs)
187 with torch.profiler.record_function(f"{key} (dynamo_timed)"):
188 t0 = time.time()
→ 189 r = func(*args, **kwargs)
190 time_spent = time.time() - t0
191 compilation_time_metrics[key].append(time_spent)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py:491, in _compile..compile_inner(code, one_graph, hooks, transform)
489 for attempt in itertools.count():
490 try:
→ 491 out_code = transform_code_object(code, transform)
492 orig_code_map[out_code] = code
493 break
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py:1028, in transform_code_object(code, transformations, safe)
1025 instructions = cleaned_instructions(code, safe)
1026 propagate_line_nums(instructions)
→ 1028 transformations(instructions, code_options)
1029 return clean_and_assemble_instructions(instructions, keys, code_options)[1]
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py:458, in _compile..transform(instructions, code_options)
456 try:
457 with tracing(tracer.output.tracing_context):
→ 458 tracer.run()
459 except (exc.RestartAnalysis, exc.SkipFrame):
460 raise
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/symbolic_convert.py:2069, in InstructionTranslator.run(self)
2068 def run(self):
→ 2069 super().run()
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/symbolic_convert.py:719, in InstructionTranslatorBase.run(self)
714 try:
715 self.output.push_tx(self)
716 while (
717 self.instruction_pointer is not None
718 and not self.output.should_exit
→ 719 and self.step()
720 ):
721 pass
722 except BackendCompilerFailed:
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/symbolic_convert.py:683, in InstructionTranslatorBase.step(self)
679 unimplemented(f"missing: {inst.opname}")
680 TracingContext.set_current_loc(
681 self.f_code.co_filename, self.lineno, self.f_code.co_name
682 )
→ 683 getattr(self, inst.opname)(inst)
685 return inst.opname != “RETURN_VALUE”
686 except Unsupported:
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/symbolic_convert.py:2157, in InstructionTranslator.RETURN_VALUE(self, inst)
2152 _step_logger()(
2153 logging.INFO,
2154 f"torchdynamo done tracing {self.f_code.co_name} (RETURN_VALUE)",
2155 )
2156 log.debug(“RETURN_VALUE triggered compile”)
→ 2157 self.output.compile_subgraph(
2158 self,
2159 reason=GraphCompileReason(
2160 “return_value”, [self.frame_summary()], graph_break=False
2161 ),
2162 )
2163 self.output.add_output_instructions([create_instruction(“RETURN_VALUE”)])
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/output_graph.py:833, in OutputGraph.compile_subgraph(self, tx, partial_convert, reason)
830 append_prefix_insts()
831 # optimization to generate better code in a common case
832 self.add_output_instructions(
→ 833 self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
834 + [create_instruction(“UNPACK_SEQUENCE”, arg=len(stack_values))]
835 )
836 else:
837 graph_output_var = self.new_var(“graph_out”)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/contextlib.py:81, in ContextDecorator.call..inner(*args, **kwds)
78 @wraps(func)
79 def inner(*args, **kwds):
80 with self._recreate_cm():
—> 81 return func(*args, **kwds)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/output_graph.py:957, in OutputGraph.compile_and_call_fx_graph(self, tx, rv, root)
952 graph_tabular_log.debug(“%s”, lazy_format_graph_tabular(name, gm))
953 graph_sizes_log.debug(
954 “%s”, LazyString(lambda: self.get_graph_sizes_log_str(name))
955 )
→ 957 compiled_fn = self.call_user_compiler(gm)
958 compiled_fn = disable(compiled_fn)
960 counters[“stats”][“unique_graphs”] += 1
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/utils.py:189, in dynamo_timed..dynamo_timed_inner..time_wrapper(*args, **kwargs)
187 with torch.profiler.record_function(f"{key} (dynamo_timed)"):
188 t0 = time.time()
→ 189 r = func(*args, **kwargs)
190 time_spent = time.time() - t0
191 compilation_time_metrics[key].append(time_spent)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/output_graph.py:1024, in OutputGraph.call_user_compiler(self, gm)
1022 unimplemented_with_warning(e, self.root_tx.f_code, msg)
1023 except Exception as e:
→ 1024 raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
1025 e.traceback
1026 ) from None
1028 signpost_event(
1029 “dynamo”,
1030 “OutputGraph.call_user_compiler”,
(…)
1036 },
1037 )
1039 return compiled_fn
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/output_graph.py:1009, in OutputGraph.call_user_compiler(self, gm)
1007 if config.verify_correctness:
1008 compiler_fn = WrapperBackend(compiler_fn)
→ 1009 compiled_fn = compiler_fn(gm, self.example_inputs())
1010 _step_logger()(logging.INFO, f"done compiler function {name}")
1011 assert callable(compiled_fn), “compiler_fn did not return callable”
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/repro/after_dynamo.py:117, in wrap_backend_debug..debug_wrapper(gm, example_inputs, **kwargs)
115 raise
116 else:
→ 117 compiled_gm = compiler_fn(gm, example_inputs)
119 return compiled_gm
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/init.py:1568, in TorchCompileInductorWrapper.call(self, model, inputs_)
1565 def call(self, model_, inputs_):
1566 from torch.inductor.compile_fx import compile_fx
→ 1568 return compile_fx(model, inputs_, config_patches=self.config)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/inductor/compile_fx.py:1150, in compile_fx(model, example_inputs_, inner_compile, config_patches, decompositions)
1143 tracing_context = (
1144 torch._guards.TracingContext.get() or torch.guards.TracingContext(fake_mode)
1145 )
1147 with V.set_fake_mode(fake_mode), torch.guards.tracing( # type: ignore[call-arg]
1148 tracing_context
1149 ), compiled_autograd.disable():
→ 1150 return aot_autograd(
1151 fw_compiler=fw_compiler,
1152 bw_compiler=bw_compiler,
1153 inference_compiler=inference_compiler,
1154 decompositions=decompositions,
1155 partition_fn=partition_fn,
1156 keep_inference_input_mutations=True,
1157 )(model, example_inputs)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/backends/common.py:55, in aot_autograd..compiler_fn(gm, example_inputs)
52 try:
53 # NB: NOT cloned!
54 with enable_aot_logging(), patch_config:
—> 55 cg = aot_module_simplified(gm, example_inputs, **kwargs)
56 counters[“aot_autograd”][“ok”] += 1
57 return disable(cg)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_functorch/aot_autograd.py:3891, in aot_module_simplified(mod, args, fw_compiler, bw_compiler, partition_fn, decompositions, keep_inference_input_mutations, inference_compiler)
3875 aot_config = AOTConfig(
3876 fw_compiler=fw_compiler,
3877 bw_compiler=bw_compiler,
(…)
3887 no_tangents=False,
3888 )
3890 with compiled_autograd.disable():
→ 3891 compiled_fn = create_aot_dispatcher_function(
3892 functional_call,
3893 full_args,
3894 aot_config,
3895 )
3897 # TODO: There is something deeply wrong here; compiled_fn running with
3898 # the boxed calling convention, but aot_module_simplified somehow
3899 # historically returned a function that was not the boxed calling
3900 # convention. This should get fixed…
3901 def forward(*runtime_args):
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/utils.py:189, in dynamo_timed..dynamo_timed_inner..time_wrapper(*args, **kwargs)
187 with torch.profiler.record_function(f"{key} (dynamo_timed)"):
188 t0 = time.time()
→ 189 r = func(*args, **kwargs)
190 time_spent = time.time() - t0
191 compilation_time_metrics[key].append(time_spent)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_functorch/aot_autograd.py:3429, in create_aot_dispatcher_function(flat_fn, flat_args, aot_config)
3426 compiler_fn = partial(aot_wrapper_dedupe, compiler_fn=compiler_fn)
3427 # You can put more passes here
→ 3429 compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata)
3430 if aot_config.is_export:
3432 mutated_user_inp_locs = [
3433 idx - aot_config.num_params_buffers
3434 for idx in fw_metadata.mutated_inp_indices
3435 if idx >= aot_config.num_params_buffers
3436 ]
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_functorch/aot_autograd.py:2212, in aot_wrapper_dedupe(flat_fn, flat_args, aot_config, compiler_fn, fw_metadata)
2209 break
2211 if ok:
→ 2212 return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata)
2214 # export path: ban duplicate inputs for now, add later if requested.
2215 if aot_config.is_export:
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_functorch/aot_autograd.py:2392, in aot_wrapper_synthetic_base(flat_fn, flat_args, aot_config, fw_metadata, needs_autograd, compiler_fn)
2390 # Happy path: we don’t need synthetic bases
2391 if synthetic_base_info is None:
→ 2392 return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)
2394 # export path: ban synthetic bases for now, add later if requested.
2395 if aot_config.is_export:
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_functorch/aot_autograd.py:2917, in aot_dispatch_autograd(flat_fn, flat_args, aot_config, fw_metadata)
2914 torch._guards.TracingContext.get().fw_metadata = fw_metadata
2916 with TracingContext.report_output_strides() as fwd_output_strides:
→ 2917 compiled_fw_func = aot_config.fw_compiler(
2918 fw_module, adjusted_flat_args
2919 )
2921 # NB: It’s important to compile backwards ahead of time, as this may
2922 # add extra guards which we need to apply to the Dynamo cache at
2923 # forwards
2924 with track_graph_compiling(aot_config, “backward”):
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/utils.py:189, in dynamo_timed..dynamo_timed_inner..time_wrapper(*args, **kwargs)
187 with torch.profiler.record_function(f"{key} (dynamo_timed)"):
188 t0 = time.time()
→ 189 r = func(*args, **kwargs)
190 time_spent = time.time() - t0
191 compilation_time_metrics[key].append(time_spent)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/compile_fx.py:1092, in compile_fx..fw_compiler_base(model, example_inputs, is_inference)
1070 # We makes the following assumption
1071 # For inference
1072 # len(orig_model_outputs) == len(model_outputs)
(…)
1080 # To make things safe, we’ll use original_output_start_index field
1081 # set by AOTAutograd to decide where the original module outputs start.
1083 user_visible_outputs = {
1084 n.name
1085 for n in model_outputs[
(…)
1089 if isinstance(n, torch.fx.Node)
1090 }
→ 1092 return inner_compile(
1093 model,
1094 example_inputs,
1095 num_fixed=fixed,
1096 cudagraphs=cudagraphs,
1097 graph_id=graph_id,
1098 is_inference=is_inference,
1099 boxed_forward_device_index=forward_device,
1100 user_visible_outputs=user_visible_outputs,
1101 )
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/repro/after_aot.py:80, in wrap_compiler_debug..debug_wrapper(gm, example_inputs, **kwargs)
75 assert config.repro_after in (“dynamo”, “aot”, None)
77 try:
78 # Call the compiler_fn - which is either aot_autograd or inductor
79 # with fake inputs
—> 80 inner_compiled_fn = compiler_fn(gm, example_inputs)
81 except Exception as e:
82 # TODO: Failures here are troublesome because no real inputs,
83 # need a different serialization strategy
84 if config.repro_after == “aot”:
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/debug.py:228, in DebugContext.wrap..inner(*args, **kwargs)
225 @functools.wraps(fn)
226 def inner(*args, **kwargs):
227 with DebugContext():
→ 228 return fn(*args, **kwargs)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/contextlib.py:81, in ContextDecorator.call..inner(*args, **kwds)
78 @wraps(func)
79 def inner(*args, **kwds):
80 with self._recreate_cm():
—> 81 return func(*args, **kwds)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/compile_fx.py:54, in time_and_log..wrap..newFunction(*args, **kwargs)
52 @wraps(old_func)
53 def newFunction(*args, **kwargs):
—> 54 return old_func(*args, **kwargs)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/compile_fx.py:341, in compile_fx_inner(gm, example_inputs, cudagraphs, num_fixed, is_backward, graph_id, cpp_wrapper, aot_mode, is_inference, boxed_forward_device_index, user_visible_outputs, layout_opt)
328 graph_args = [gm, example_inputs]
329 graph_kwargs = {
330 “cudagraphs”: cudagraphs,
331 “num_fixed”: num_fixed,
(…)
338 “layout_opt”: layout_opt,
339 }
→ 341 compiled_graph: CompiledFxGraph = fx_codegen_and_compile(
342 *graph_args, **graph_kwargs # type: ignore[arg-type]
343 )
345 if aot_mode:
346 return compiled_graph
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/compile_fx.py:565, in fx_codegen_and_compile(gm, example_inputs, cudagraphs, num_fixed, is_backward, graph_id, cpp_wrapper, aot_mode, is_inference, user_visible_outputs, layout_opt)
563 else:
564 context.output_strides.append(None)
→ 565 compiled_fn = graph.compile_to_fn()
567 if graph.disable_cudagraphs:
568 BoxedBool.disable(cudagraphs)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/graph.py:970, in GraphLowering.compile_to_fn(self)
968 return AotCodeCache.compile(self, code, cuda=self.cuda)
969 else:
→ 970 return self.compile_to_module().call
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/utils.py:189, in dynamo_timed..dynamo_timed_inner..time_wrapper(*args, **kwargs)
187 with torch.profiler.record_function(f"{key} (dynamo_timed)"):
188 t0 = time.time()
→ 189 r = func(*args, **kwargs)
190 time_spent = time.time() - t0
191 compilation_time_metrics[key].append(time_spent)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/graph.py:938, in GraphLowering.compile_to_module(self)
934 @dynamo_timed
935 def compile_to_module(self):
936 from .codecache import PyCodeCache
→ 938 code, linemap = self.codegen()
939 linemap = [(line_no, node.stack_trace) for line_no, node in linemap]
940 key, path = PyCodeCache.write(code)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/graph.py:915, in GraphLowering.codegen(self)
913 self.scheduler = Scheduler(self.buffers)
914 assert self.scheduler is not None # mypy can’t figure this out
→ 915 self.scheduler.codegen()
916 assert self.wrapper_code is not None
917 return self.wrapper_code.generate()
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_dynamo/utils.py:189, in dynamo_timed..dynamo_timed_inner..time_wrapper(*args, **kwargs)
187 with torch.profiler.record_function(f"{key} (dynamo_timed)"):
188 t0 = time.time()
→ 189 r = func(*args, **kwargs)
190 time_spent = time.time() - t0
191 compilation_time_metrics[key].append(time_spent)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/scheduler.py:1690, in Scheduler.codegen(self)
1688 self.get_backend(device).codegen_foreach(node)
1689 elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
→ 1690 self.get_backend(device).codegen_nodes(node.get_nodes())
1691 else:
1692 assert isinstance(node, NopKernelSchedulerNode)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codegen/cpp.py:2805, in CppScheduling.codegen_nodes(self, nodes)
2800 “”"
2801 Turn an set of pre-fused nodes into a C++ kernel.
2802 “”"
2803 kernel_group = self.kernel_group
→ 2805 cpp_kernel_proxy = CppKernelProxy(kernel_group)
2806 cpp_kernel_proxy.codegen_nodes(nodes)
2808 kernel_group.finalize_kernel(cpp_kernel_proxy, nodes)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codegen/cpp.py:2352, in CppKernelProxy.init(self, kernel_group)
2350 self.loop_nest = None
2351 self.call_ranges = None
→ 2352 self.picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa()
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codecache.py:633, in pick_vec_isa()
632 def pick_vec_isa():
→ 633 _valid_vec_isa_list: List[VecISA] = valid_vec_isa_list()
634 if not _valid_vec_isa_list:
635 return invalid_vec_isa
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codecache.py:627, in valid_vec_isa_list()
625 _cpu_info_content = _cpu_info.read()
626 for isa in supported_vec_isa_list:
→ 627 if str(isa) in _cpu_info_content and isa:
628 isa_list.append(isa)
629 return isa_list
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codecache.py:548, in VecISA.bool(self)
545 with lock:
546 output_path = input_path[:-3] + “so”
547 build_cmd = shlex.split(
→ 548 cpp_compile_command(
549 input_path, output_path, warning_all=False, vec_isa=self
550 )
551 )
552 try:
553 # Check build result
554 compile_file(input_path, output_path, build_cmd)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codecache.py:899, in cpp_compile_command(input, output, warning_all, shared, include_pytorch, vec_isa, cuda, aot_mode)
893 out_name = output
894 linker_paths = “” # let the compiler pick
895 return re.sub(
896 r"[ \n]+“,
897 " “,
898 f””"
→ 899 {cpp_compiler()} {inp_name} {get_shared(shared)}
900 {get_warning_all_flag(warning_all)} {cpp_flags()}
901 {ipaths} {lpaths} {libs} {macros} {linker_paths}
902 {optimization_flags()}
903 {use_custom_generated_macros()}
904 {use_fb_internal_macros()}
905 {use_standard_sys_dir_headers()}
906 -o {out_name}
907 “”",
908 ).strip()
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codecache.py:411, in cpp_compiler()
409 else:
410 search = (config.cpp.cxx,)
→ 411 return cpp_compiler_search(search)
File ~/anaconda3/envs/intel_pyg/lib/python3.11/site-packages/torch/_inductor/codecache.py:438, in cpp_compiler_search(search)
436 except (subprocess.SubprocessError, FileNotFoundError, ImportError):
437 continue
→ 438 raise exc.InvalidCxxCompiler()
BackendCompilerFailed: backend=‘inductor’ raised:
InvalidCxxCompiler: No working C++ compiler found in torch._inductor.config.cpp.cxx: (None, ‘g++’)
Set TORCH_LOGS=“+dynamo” and TORCHDYNAMO_VERBOSE=1 for more information`
Dependencies:
I have installed pytorch=2.0.1+cpu and pyg=2.4
System:
AWS: t2.2xlarge