PyTorch C++ Extension with CMake, Minimal Example

Hey,

does someone have a minimal example on how to compile a PyTorch C++ extension for Python with CMake?
I have something with PyBind11 and Numpy, but a minimal example for PyTorch would be nice.
I think in many cases CMake is favourable over the setuptools build process.

Cheers

2 Likes

offical tutorial https://pytorch.org/tutorials/advanced/cpp_extension.html
github https://github.com/pytorch/extension-cpp

Unfortunately, this is not what I meant. I was wondering whether there is an example where the extension is built with CMake and not via setuptools.

To provide more context: I am referring to what is described in pybind11 here: PyBind11-CMake

So I figure it should be possible with Pytorch extensions as well and is often favourable over setuptools because it is more platform-independent.

I think @artbataev has done it, am I correct? Maybe you could give me a hint :slight_smile:

Cheers,
Lucas

You can see my code in https://github.com/artbataev/end2end, tested with Pytorch 1.3

I think it is far from ideal, but it works. If you have some questions, I will try to answer.

https://github.com/artbataev/end2end/blob/master/setup.py - building in setup.py
https://github.com/artbataev/end2end/blob/master/CMakeLists.txt - basic project CMake file
https://github.com/artbataev/end2end/blob/master/pytorch_end2end/src/CMakeLists.txt - compiling pytorch plugin

I haven’t found a good solution for the problem with CUDNN, but it’s possible to specify cudnn path manually.

2 Likes

I guess my usecase is slightly different. I want to run my pytorch extension in a minimal external c++ program. the purpose is to run profilers and debuggers on c++ and cuda code more easily. Therefore I was able to ignore all the pybind magic. I read some tensors from files and run the kernel.

This is my cmake file, i’m just posting so somebody might benefit from it (it took me 2 days to make it work):

cmake_minimum_required(VERSION 3.18)
# get cmake 3.18 from conda-forge; cmake < 3.18 doesn't support CMAKE_CUDA_ARCHITECTURES
# in my case it chose a very old cuda ARCHITECTURE by default, because i have an old geforce for display

project(my_pytorch_extensions LANGUAGES CUDA CXX)

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

if(NOT DEFINED CMAKE_CUDA_STANDARD)
    set(CMAKE_CUDA_STANDARD 14)
    set(CMAKE_CUDA_STANDARD_REQUIRED ON)
endif()
set(CMAKE_CUDA_ARCHITECTURES 60)
string(APPEND CMAKE_CUDA_FLAGS " -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options '-fPIC' -O3 --use_fast_math")

# guard all pybind11 references with this.
# that is, ignore python bindings for the test build
# c++ code will be #ifndef CMAKE_TEST_BUILD \n pybind11 related stuff \n #endif
add_compile_definitions(CMAKE_TEST_BUILD)

# can be taken from $ENV{CONDA_PREFIX} but i have the conda environment only on demand and my ide doesn't have it
set (MY_ANACONDA_PATH "/home/madam/bin/anaconda3")

list(APPEND CMAKE_PREFIX_PATH "${MY_ANACONDA_PATH}/lib/python3.7/site-packages/torch/")


# pytorch cuda version and this one must match
set(CUDNN_INCLUDE_PATH
   ${MY_ANACONDA_PATH}/pkgs/cudnn-7.6.4-cuda10.1_0/include/
   )
set (CUDNN_LIBRARY_PATH
   ${MY_ANACONDA_PATH}/pkgs/cudnn-7.6.4-cuda10.1_0/lib/libcudnn.so
   )

find_package(OpenMP REQUIRED)
find_package(Torch REQUIRED)

set(MY_INCLUDE_PATHS
    ${MY_ANACONDA_PATH}/include/python3.7m/
    ../glm
    .
)

# SYSTEM hides warnings from external headers
include_directories(SYSTEM ${MY_INCLUDE_PATHS} SYSTEM ${TORCH_INCLUDE_DIRS})

set(HEADERS
    common.h
)

set(EVALUATE2_SOURCES
    evaluate2/evaluate2_inversed_cpu.cpp
    evaluate2/evaluate2_inversed_cuda.cpp
    evaluate2/evaluate2_inversed_cuda.cu
    evaluate2/test_runner.cpp
)

add_executable(gm_evaluate2 ${HEADERS} ${EVALUATE2_SOURCES})
target_link_libraries(gm_evaluate2 PUBLIC OpenMP::OpenMP_CXX torch)

1 Like