I tried to build knn-pytorch package but i met this errors for many hours.
I’m currently using ubuntu 20.04.06 LTS and pytorch 2.2.2, cuda 12.1, cudnn 8.9.06
I need your help
Processing /home/lee/ros1_noetic/src/graspnet-baseline/knn
Building wheels for collected packages: knn-pytorch
Building wheel for knn-pytorch (setup.py) ... error
ERROR: Command errored out with exit status 1:
command: /usr/bin/python3 -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-req-build-iuiz8qyv/setup.py'"'"'; __file__='"'"'/tmp/pip-req-build-iuiz8qyv/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d /tmp/pip-wheel-r3y563j5
cwd: /tmp/pip-req-build-iuiz8qyv/
Complete output (111 lines):
running bdist_wheel
running build
running build_ext
/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py:425: UserWarning: There are no x86_64-linux-gnu-g++ version bounds defined for CUDA version 12.1
warnings.warn(f'There are no {compiler_name} version bounds defined for CUDA version {cuda_str_version}')
building 'knn_pytorch.knn_pytorch' extension
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cpu
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cuda
Emitting ninja build file /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/build.ninja...
Compiling objects...
Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)
[1/3] /usr/local/cuda-12.1/bin/nvcc --generate-dependencies-with-compile --dependency-output /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cuda/knn.o.d -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/cuda/knn.cu -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cuda/knn.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 -std=c++17
/tmp/pip-req-build-iuiz8qyv/src/cuda/knn.cu(231): warning #177-D: variable "t_k_16x16" was declared but never referenced
dim3 t_k_16x16(16, 16, 1);
^
Remark: The warnings can be suppressed with "-diag-suppress <warning-number>"
[2/3] c++ -MMD -MF /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o.d -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/vision.cpp -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++17
FAILED: /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o
c++ -MMD -MF /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o.d -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/vision.cpp -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++17
In file included from /tmp/pip-req-build-iuiz8qyv/src/vision.cpp:1:
/tmp/pip-req-build-iuiz8qyv/src/knn.h: In function ‘int knn(at::Tensor&, at::Tensor&, at::Tensor&)’:
/tmp/pip-req-build-iuiz8qyv/src/knn.h:24:17: warning: ‘at::DeprecatedTypeProperties& at::Tensor::type() const’ is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [-Wdeprecated-declarations]
24 | if (ref.type().is_cuda()) {
| ~~~~~~~~^~
In file included from /home/lee/.local/lib/python3.8/site-packages/torch/include/ATen/core/Tensor.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/ATen/Tensor.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/function_hook.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h:2,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/variable.h:6,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/autograd.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include/torch/all.h:7,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/extension.h:5,
from /tmp/pip-req-build-iuiz8qyv/src/cpu/vision.h:2,
from /tmp/pip-req-build-iuiz8qyv/src/knn.h:2,
from /tmp/pip-req-build-iuiz8qyv/src/vision.cpp:1:
/home/lee/.local/lib/python3.8/site-packages/torch/include/ATen/core/TensorBody.h:225:30: note: declared here
225 | DeprecatedTypeProperties & type() const {
| ^~~~
In file included from /tmp/pip-req-build-iuiz8qyv/src/vision.cpp:1:
/tmp/pip-req-build-iuiz8qyv/src/knn.h:27:57: error: ‘at::cuda::CUDAContext’ has not been declared
27 | float *dist_dev = static_cast<float*>(at::cuda::CUDAContext::malloc(ref_nb * query_nb * sizeof(float)));
| ^~~~~~~~~~~
/tmp/pip-req-build-iuiz8qyv/src/knn.h:34:19: error: ‘at::cuda::CUDAContext’ has not been declared
34 | at::cuda::CUDAContext::free(dist_dev);
| ^~~~~~~~~~~
[3/3] c++ -MMD -MF /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cpu/knn_cpu.o.d -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/cpu/knn_cpu.cpp -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cpu/knn_cpu.o -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++17
ninja: build stopped: subcommand failed.
Traceback (most recent call last):
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 2096, in _run_ninja_build
subprocess.run(
File "/usr/lib/python3.8/subprocess.py", line 516, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['ninja', '-v']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-req-build-iuiz8qyv/setup.py", line 58, in <module>
setup(
File "/usr/lib/python3/dist-packages/setuptools/__init__.py", line 144, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python3.8/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.8/distutils/dist.py", line 966, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.8/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/home/lee/.local/lib/python3.8/site-packages/wheel/bdist_wheel.py", line 368, in run
self.run_command("build")
File "/usr/lib/python3.8/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.8/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/usr/lib/python3.8/distutils/command/build.py", line 135, in run
self.run_command(cmd_name)
File "/usr/lib/python3.8/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.8/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/usr/lib/python3/dist-packages/setuptools/command/build_ext.py", line 87, in run
_build_ext.run(self)
File "/usr/lib/python3/dist-packages/Cython/Distutils/old_build_ext.py", line 186, in run
_build_ext.build_ext.run(self)
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 340, in run
self.build_extensions()
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 871, in build_extensions
build_ext.build_extensions(self)
File "/usr/lib/python3/dist-packages/Cython/Distutils/old_build_ext.py", line 195, in build_extensions
_build_ext.build_ext.build_extensions(self)
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 449, in build_extensions
self._build_extensions_serial()
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 474, in _build_extensions_serial
self.build_extension(ext)
File "/usr/lib/python3/dist-packages/setuptools/command/build_ext.py", line 208, in build_extension
_build_ext.build_extension(self, ext)
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 528, in build_extension
objects = self.compiler.compile(sources,
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 684, in unix_wrap_ninja_compile
_write_ninja_file_and_compile_objects(
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 1774, in _write_ninja_file_and_compile_objects
_run_ninja_build(
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 2112, in _run_ninja_build
raise RuntimeError(message) from e
RuntimeError: Error compiling objects for extension
----------------------------------------
ERROR: Failed building wheel for knn-pytorch
Running setup.py clean for knn-pytorch
Failed to build knn-pytorch
Installing collected packages: knn-pytorch
Running setup.py install for knn-pytorch ... error
ERROR: Command errored out with exit status 1:
command: /usr/bin/python3 -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-req-build-iuiz8qyv/setup.py'"'"'; __file__='"'"'/tmp/pip-req-build-iuiz8qyv/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record /tmp/pip-record-c1i5qtkg/install-record.txt --single-version-externally-managed --user --prefix= --compile --install-headers /home/lee/.local/include/python3.8/knn-pytorch
cwd: /tmp/pip-req-build-iuiz8qyv/
Complete output (115 lines):
running install
running build
running build_ext
/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py:425: UserWarning: There are no x86_64-linux-gnu-g++ version bounds defined for CUDA version 12.1
warnings.warn(f'There are no {compiler_name} version bounds defined for CUDA version {cuda_str_version}')
building 'knn_pytorch.knn_pytorch' extension
creating /tmp/pip-req-build-iuiz8qyv/build
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cpu
creating /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cuda
Emitting ninja build file /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/build.ninja...
Compiling objects...
Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)
[1/3] /usr/local/cuda-12.1/bin/nvcc --generate-dependencies-with-compile --dependency-output /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cuda/knn.o.d -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/cuda/knn.cu -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cuda/knn.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 -std=c++17
/tmp/pip-req-build-iuiz8qyv/src/cuda/knn.cu(231): warning #177-D: variable "t_k_16x16" was declared but never referenced
dim3 t_k_16x16(16, 16, 1);
^
Remark: The warnings can be suppressed with "-diag-suppress <warning-number>"
[2/3] c++ -MMD -MF /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o.d -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/vision.cpp -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++17
FAILED: /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o
c++ -MMD -MF /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o.d -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/vision.cpp -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/vision.o -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++17
In file included from /tmp/pip-req-build-iuiz8qyv/src/vision.cpp:1:
/tmp/pip-req-build-iuiz8qyv/src/knn.h: In function ‘int knn(at::Tensor&, at::Tensor&, at::Tensor&)’:
/tmp/pip-req-build-iuiz8qyv/src/knn.h:24:17: warning: ‘at::DeprecatedTypeProperties& at::Tensor::type() const’ is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [-Wdeprecated-declarations]
24 | if (ref.type().is_cuda()) {
| ~~~~~~~~^~
In file included from /home/lee/.local/lib/python3.8/site-packages/torch/include/ATen/core/Tensor.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/ATen/Tensor.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/function_hook.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h:2,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/variable.h:6,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/autograd/autograd.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h:3,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include/torch/all.h:7,
from /home/lee/.local/lib/python3.8/site-packages/torch/include/torch/extension.h:5,
from /tmp/pip-req-build-iuiz8qyv/src/cpu/vision.h:2,
from /tmp/pip-req-build-iuiz8qyv/src/knn.h:2,
from /tmp/pip-req-build-iuiz8qyv/src/vision.cpp:1:
/home/lee/.local/lib/python3.8/site-packages/torch/include/ATen/core/TensorBody.h:225:30: note: declared here
225 | DeprecatedTypeProperties & type() const {
| ^~~~
In file included from /tmp/pip-req-build-iuiz8qyv/src/vision.cpp:1:
/tmp/pip-req-build-iuiz8qyv/src/knn.h:27:57: error: ‘at::cuda::CUDAContext’ has not been declared
27 | float *dist_dev = static_cast<float*>(at::cuda::CUDAContext::malloc(ref_nb * query_nb * sizeof(float)));
| ^~~~~~~~~~~
/tmp/pip-req-build-iuiz8qyv/src/knn.h:34:19: error: ‘at::cuda::CUDAContext’ has not been declared
34 | at::cuda::CUDAContext::free(dist_dev);
| ^~~~~~~~~~~
[3/3] c++ -MMD -MF /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cpu/knn_cpu.o.d -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DWITH_CUDA -I/tmp/pip-req-build-iuiz8qyv/src -I/home/lee/.local/lib/python3.8/site-packages/torch/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/home/lee/.local/lib/python3.8/site-packages/torch/include/TH -I/home/lee/.local/lib/python3.8/site-packages/torch/include/THC -I/usr/local/cuda-12.1/include -I/usr/include/python3.8 -c -c /tmp/pip-req-build-iuiz8qyv/src/cpu/knn_cpu.cpp -o /tmp/pip-req-build-iuiz8qyv/build/temp.linux-x86_64-3.8/tmp/pip-req-build-iuiz8qyv/src/cpu/knn_cpu.o -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=knn_pytorch -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++17
ninja: build stopped: subcommand failed.
Traceback (most recent call last):
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 2096, in _run_ninja_build
subprocess.run(
File "/usr/lib/python3.8/subprocess.py", line 516, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['ninja', '-v']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-req-build-iuiz8qyv/setup.py", line 58, in <module>
setup(
File "/usr/lib/python3/dist-packages/setuptools/__init__.py", line 144, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python3.8/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.8/distutils/dist.py", line 966, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.8/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/usr/lib/python3/dist-packages/setuptools/command/install.py", line 61, in run
return orig.install.run(self)
File "/usr/lib/python3.8/distutils/command/install.py", line 589, in run
self.run_command('build')
File "/usr/lib/python3.8/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.8/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/usr/lib/python3.8/distutils/command/build.py", line 135, in run
self.run_command(cmd_name)
File "/usr/lib/python3.8/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.8/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/usr/lib/python3/dist-packages/setuptools/command/build_ext.py", line 87, in run
_build_ext.run(self)
File "/usr/lib/python3/dist-packages/Cython/Distutils/old_build_ext.py", line 186, in run
_build_ext.build_ext.run(self)
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 340, in run
self.build_extensions()
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 871, in build_extensions
build_ext.build_extensions(self)
File "/usr/lib/python3/dist-packages/Cython/Distutils/old_build_ext.py", line 195, in build_extensions
_build_ext.build_ext.build_extensions(self)
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 449, in build_extensions
self._build_extensions_serial()
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 474, in _build_extensions_serial
self.build_extension(ext)
File "/usr/lib/python3/dist-packages/setuptools/command/build_ext.py", line 208, in build_extension
_build_ext.build_extension(self, ext)
File "/usr/lib/python3.8/distutils/command/build_ext.py", line 528, in build_extension
objects = self.compiler.compile(sources,
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 684, in unix_wrap_ninja_compile
_write_ninja_file_and_compile_objects(
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 1774, in _write_ninja_file_and_compile_objects
_run_ninja_build(
File "/home/lee/.local/lib/python3.8/site-packages/torch/utils/cpp_extension.py", line 2112, in _run_ninja_build
raise RuntimeError(message) from e
RuntimeError: Error compiling objects for extension
----------------------------------------
ERROR: Command errored out with exit status 1: /usr/bin/python3 -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-req-build-iuiz8qyv/setup.py'"'"'; __file__='"'"'/tmp/pip-req-build-iuiz8qyv/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record /tmp/pip-record-c1i5qtkg/install-record.txt --single-version-externally-managed --user --prefix= --compile --install-headers /home/lee/.local/include/python3.8/knn-pytorch Check the logs for full command output.
I’ve tried to upgrade GCC version and this is the result
lee@lee-Robot-Dev:~$ gcc --version
gcc (Ubuntu 11.4.0-2ubuntu1~20.04) 11.4.0
Copyright (C) 2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
lee@lee-Robot-Dev:~$ python3 -c "import torch; print(torch.__version__, torch.version.cuda, torch.cuda.is_available(), torch.backends.cudnn.version(), torch.backends.cudnn.is_available())"
2.2.2+cu121 12.1 True 8906 True
How should I handle it if I don’t see any issues with my thoughts?
ps. I don’t know how to use ATen namespace so i just used chatgpt to modify the sourcecode. I think that’s why error occors, but i can’t fix it .
here’s the code
#pragma once
#include "cpu/vision.h"
#ifdef WITH_CUDA
#include "cuda/vision.h"
#include <ATen/cuda/CUDAContext.h>
#endif
int knn(at::Tensor& ref, at::Tensor& query, at::Tensor& idx)
{
// TODO check dimensions
long batch, ref_nb, query_nb, dim, k;
batch = ref.size(0);
dim = ref.size(1);
k = idx.size(1);
ref_nb = ref.size(2);
query_nb = query.size(2);
float *ref_dev = ref.data_ptr<float>();
float *query_dev = query.data_ptr<float>();
long *idx_dev = idx.data_ptr<long>();
if (ref.type().is_cuda()) {
#ifdef WITH_CUDA
// TODO raise error if not compiled with CUDA
float *dist_dev = static_cast<float*>(at::cuda::CUDAContext::malloc(ref_nb * query_nb * sizeof(float)));
for (int b = 0; b < batch; b++)
{
knn_device(ref_dev + b * dim * ref_nb, ref_nb, query_dev + b * dim * query_nb, query_nb, dim, k,
dist_dev, idx_dev + b * k * query_nb, at::cuda::getCurrentCUDAStream());
}
at::cuda::CUDAContext::free(dist_dev);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in knn: %s\n", cudaGetErrorString(err));
AT_ERROR("aborting");
}
return 1;
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
float *dist_dev = static_cast<float*>(malloc(ref_nb * query_nb * sizeof(float)));
long *ind_buf = static_cast<long*>(malloc(ref_nb * sizeof(long)));
for (int b = 0; b < batch; b++) {
knn_cpu(ref_dev + b * dim * ref_nb, ref_nb, query_dev + b * dim * query_nb, query_nb, dim, k,
dist_dev, idx_dev + b * k * query_nb, ind_buf);
}
free(dist_dev);
free(ind_buf);
return 1;
}