Jetson Xavier NX Libtorch CUDA Problem

Using Jetpack 4.4 and installed the Pytorch wheel from here PyTorch for Jetson - version 1.6.0 now available.

But torch::cuda::cudnn_is_available() is showing false.

Strangely, using Python3 import torch, its says cuda available is true.

My makefile is using the Python3 to get location of Libtorch.

While CUDA seems to be available, cudnn doesn’t seem to be shipped in the binary.
I’m not sure, if there is a typo in your post or if you are checking different libs in C++ and Python.

On Python3 it says true but,
I am using C++ Libtorch.
Using C++ on a DGPU machine torch::cuda::cudnn_is_available() says true but on Jetson Xavier NX it says false.
Here is my makefile:-

APP:= back-to-back-detectors
CC:= g++
TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -)
PYTHON_HEADER_DIR := $(shell python3 -c 'from distutils.sysconfig import get_python_inc; print(get_python_inc())')
PYTORCH_INCLUDES := $(shell python3 -c 'from torch.utils.cpp_extension import include_paths; [print(p) for p in include_paths()]')
PYTORCH_LIBRARIES := $(shell python3 -c 'from torch.utils.cpp_extension import library_paths; [print(p) for p in library_paths()]')
INCLUDE_DIRS += $(PYTHON_HEADER_DIR)
INCLUDE_DIRS += $(PYTORCH_INCLUDES)
COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -DTORCH_API_INCLUDE_EXTENSION_H -D_GLIBCXX_USE_CXX11_ABI=0
CFLAGS= -O3 -fopenmp -march=native -fpermissive $(COMMON_FLAGS)
CUDA_VER=10.2
NVDS_VERSION:=5.0

LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib

ifeq ($(TARGET_DEVICE),aarch64)
  CFLAGS:= -DPLATFORM_TEGRA
endif

SRCS:= $(wildcard *.cpp)

INCS:= $(wildcard *.h)

PKGS:= gstreamer-1.0 gstreamer-base-1.0 gstreamer-video-1.0 x11 opencv

OBJS:= $(SRCS:.cpp=.o)

CFLAGS+= -fPIC -DDS_VERSION=\"5.0.0\" \
-I /usr/local/cuda-$(CUDA_VER)/include \
-I$(DS_SDK_ROOT)/sources/includes 
 
CFLAGS+= `pkg-config --cflags $(PKGS)`

LIBS:= `pkg-config --libs $(PKGS)`


LIBS+= -Wl,-no-undefined \
       -L$(LIB_INSTALL_DIR) -lnvdsgst_meta -lnvds_meta -lnvdsgst_helper -lnvdsgst_meta -lnvds_meta -lnvbufsurface -lnvbufsurftransform\
	   -Wl,-rpath,$(LIB_INSTALL_DIR)\
	   -L/usr/local/cuda-$(CUDA_VER)/lib64/ -lcudart -ldl \
	   -lnppc -lnppig -lnpps -lnppicc -lnppidei


LIBS+= -Wl,-no-undefined \
       -L$(PYTORCH_LIBRARIES) -ltorch -lc10 -lgomp -lnvToolsExt -lc10_cuda -ltorch_cuda -ltorch_cpu\
	   -Wl,-rpath,$(PYTORCH_LIBRARIES)
	  


all: $(APP)

%.o: %.cpp $(INCS) Makefile
	$(CC) -c -o $@ $(CFLAGS) $<

$(APP): $(OBJS) Makefile
	$(CC) -o $(APP) $(OBJS) $(LIBS)

clean:
	rm -rf $(OBJS) $(APP)

Yes there was a typo, it should have been torch::cuda::is_available(). But this too is showing false in .cpp while on Python3 it shows true.