Torch_cuda is never loaded without explicitly doing so

CMakeLists.txt:

cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
project(torchtest)
enable_language(CUDA)

get_filename_component(CMAKE_PREFIX_PATH ../libtorch ABSOLUTE)
get_filename_component(CUDNN_ROOT ../cudnn ABSOLUTE)
find_package(Torch REQUIRED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

add_executable(test1 test.cpp)
target_link_libraries(test1 PRIVATE "${TORCH_LIBRARIES}")

if (MSVC)
  file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")
  add_custom_command(TARGET test1
                     POST_BUILD
                     COMMAND ${CMAKE_COMMAND} -E copy_if_different
                     ${TORCH_DLLS}
                     $<TARGET_FILE_DIR:test1>)
endif (MSVC)

test1.cpp:

#include <iostream>
#include <torch/torch.h>

int main() {
    std::cout << torch::cuda::is_available() << std::endl;
    std::cout << torch::cuda::cudnn_is_available() << std::endl;
    return 0;
}

Output:

0
0

If I edit test1.cpp to

#include <iostream>
#include <torch/torch.h>
#include <Windows.h>

int main() {
    LoadLibraryA("torch_cuda.dll");
    std::cout << torch::cuda::is_available() << std::endl;
    std::cout << torch::cuda::cudnn_is_available() << std::endl;
    return 0;
}

It outputs

1
1

as intented. CUDA: 11.8, Libtorch: cu118 latest, OS: Windows 10, Generator: Ninja Multi-Config, Compiler: icx
How should I fix this?

I cannot reproduce the issue in Linux using a nightly release and get:

1
1

for he first code snippet already, so I guess the issue might be Windows-specific.

Will this get fixed in the future? Or should continue to I use LoadLibraryA?

I don’t know if this issue is still there as I wasn’t able to reproduce it on Linux so we might need to wait for someone else using Windows or you could use the latest nightly release or source build to check if you can still see the issue. If so, you could create an issue on GitHub so that code owners could track and fix it.