libtorch can't link library with protobuf using cuda

:question: Questions and Help

Please note that this issue tracker is not a help form and this issue will be closed.

We have a set of listed resources available on the website. Our primary means of support is our discussion forum:

How to link correctly using TARGET_LINK_LIBRARY between pytorch-cu111 and google_protobuffer?

I tried to search solutions in forum and followed pytorch tutorials…
But, Most of solutions in forum has been NOT working


I want to use libtorchwith CUDA 11.2

My env (custom desktop)
SW
python 3.8.10, conda 4.10.3 ( ML workspace docker based )
downloaded libtorch : libtorch-cxx11-abi-shared-with-deps-1.9.0+cu111.zip

HW
Ubuntu 20.04
16core intel CPU
V100 CUDA 11.2, Driver Version: 418.67
180GB RAM


remark : python based pytorch can use CUDA and works well,

import torch
device = torch.device('cuda')
# works very well, no error occured .

but libtorch can’t use cuda linking with protobuf

  1. file : CMakeLists txt
CMAKE_MINIMUM_REQUIRED(VERSION 3.8.2 FATAL_ERROR)
PROJECT(target_name)

#SET(CMAKE_CXX_STANDARD 17)
SET(CMAKE_CXX_STANDARD_REQUIRED ON)
SET(CMAKE_POSITION_INDEPENDENT_CODE ON)
#add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=1)
#add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)

add_definitions(-DPYBIND=1)
ADD_SUBDIRECTORY(pybind11)

INCLUDE(FindProtobuf)
FIND_PACKAGE(Protobuf REQUIRED)
INCLUDE_DIRECTORIES(${Protobuf_INCLUDE_DIRS})
FILE(GLOB PROTO_DEF "src/mmm_api/protobuf/*.proto")
PROTOBUF_GENERATE_CPP(PROTO_SRC PROTO_HEADER ${PROTO_DEF})
ADD_LIBRARY(proto ${PROTO_HEADER} ${PROTO_SRC})

#SET(TORCH_FOLDER "/workspace/work_dir/libtorch_gpu/libtorch")
#ADD_LIBRARY(torch SHARED IMPORTED)
#set_target_properties(
#    torch PROPERTIES IMPORTED_LOCATION ${TORCH_FOLDER}/lib/libtorch.so)
#INCLUDE_DIRECTORIES( "${TORCH_FOLDER}/include" )
#INCLUDE_DIRECTORIES( "${TORCH_FOLDER}/include/torch/csrc/api/include" )
#FILE(GLOB TORCH_DLLS "${TORCH_FOLDER}/lib/*.so")

find_package(Torch REQUIRED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
SET(TORCH_DIR "/opt/conda/envs/work_env/lib/python3.8/site-packages/torch")
INCLUDE_DIRECTORIES( "${TORCH_DIR}/include" )
INCLUDE_DIRECTORIES( "${TORCH_DIR}/include/torch/csrc/api/include" )
FILE(GLOB TORCH_DLLS "${TORCH_DIR}/lib/*.so")

PYBIND11_ADD_MODULE(target_name
    src/mmm_api/lib.cpp
    src/mmm_api/dataset/lz4.c 
    src/mmm_api/protobuf/midi.pb.cc)

TARGET_LINK_LIBRARIES(target_name PRIVATE proto "${TORCH_LIBRARIES}" "${Protobuf_LIBRARIES}")
  1. file : setup py
import os
import re
import sys
import platform
import subprocess
import torch

from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion


class CMakeExtension(Extension):
    def __init__(self, name, sourcedir=''):
        Extension.__init__(self, name, sources=[])
        self.sourcedir = os.path.abspath(sourcedir)


class CMakeBuild(build_ext):
    def run(self):
        try:
            out = subprocess.check_output(['cmake', '--version'])
        except OSError:
            raise RuntimeError("CMake must be installed to build the following extensions: " +
                               ", ".join(e.name for e in self.extensions))

        if platform.system() == "Windows":
            cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
            if cmake_version < '3.1.0':
                raise RuntimeError("CMake >= 3.1.0 is required on Windows")

        for ext in self.extensions:
            self.build_extension(ext)

    def build_extension(self, ext):
        extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
        cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
                      '-DPYTHON_EXECUTABLE=' + sys.executable
                      # '-DProtobuf_DIR=.../tmp_install/cmake'
        ]

        #if hasattr(torch.utils, "cmake_prefix_path"):
        #    cmake_args.append('-DCMAKE_PREFIX_PATH=' + torch.utils.cmake_prefix_path)
        #    print(f'torch cmake:: {torch.utils.cmake_prefix_path}')
        cmake_args.append('-DCMAKE_PREFIX_PATH=' + '/workspace/work_dir/libtorch_gpu/libtorch')
        # cmake_args.append('-DProtobuf_DIR=' + '/opt/conda/envs/final_01/lib/python3.8/site-packages/torch/lib/tmp_install/cmake')
        # /opt/conda/envs/final_01/lib/python3.8/site-packages/torch/lib/tmp_install
        cfg = 'Debug' if self.debug else 'Release'
        build_args = ['--config', cfg]

        if platform.system() == "Windows":
            cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
            if sys.maxsize > 2**32:
                cmake_args += ['-A', 'x64']
            build_args += ['--', '/m']
        else:
            cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
            build_args += ['--', '-j2']

        env = os.environ.copy()
        env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', '--std=c++17'),
                                                              self.distribution.get_version())
        if not os.path.exists(self.build_temp):
            os.makedirs(self.build_temp)
        subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
        subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)


setup(
    name='some_name',
    version='0.0.4',
    author='some_named',
    description='some library',
    keywords='some keywords',
    long_description='some library',
    ext_modules=[CMakeExtension('target_name')],
    cmdclass=dict(build_ext=CMakeBuild),
    zip_safe=False,
)

Is there solution to solve this problem ( libtorch cuda linking with protobuf ) ?
More Detail Example Link : Google Colab