i am using libtorch (1.2),but i can run std::cout << “cuda is used:” << torch::cuda::is_available() and torch::cuda::cudnn_is_available() . i dont know why can’t run torch::cuda::synchronize().
type or paste code here
```int main(int agrc, const char *agrv[]) {
torch::Tensor tensor = torch::rand({2, 3});
std::cout << tensor << std::endl;
std::cout << "cuda is used:" << torch::cuda::is_available() << std::endl;
std::cout << "cudnn is used:" << torch::cuda::cudnn_is_available() << std::endl;
torch::cuda::synchronize();
torch::DeviceType device_type;
device_type = torch::kCUDA;
torch::Device device(device_type);
std::cout << "cuda:" << device_type << std::endl;
torch::jit::script::Module module = torch::jit::load("/home/yu/code/model22.pt", device);
// Deserialize the ScriptModule from a file using torch::jit::load().
std::vector<torch::jit::IValue> inputs;
//start = clock();
inputs.push_back(torch::ones({1, 3, 224, 224}).to(device));
//end =clock();
//std::cout << (double)(end-start)/ CLOCKS_PER_SEC << "seconds" << std::endl;
std::cout << "cuda used at:" << torch::cuda::device_count() << std::endl;
// Exectute the model
at::Tensor output = module.forward(inputs).toTensor();
std::cout << output.slice(/*dims=*/1, /*start=*/0, /*end=*/5) << '\n';
std::cout << "ok\n";
}