Additional info:
OS: Windows 10
Libtorch: 1.3.1
CUDA: 10.1
cudnn: 7.6.5
VS: 2017
The memory layout in my code is NHWC. I just found that it only support NCHW format in libtorch. So, the updated code:
#include <torch/torch.h>
using namespace torch;
struct ConvNet : nn::Module
{
ConvNet()
:conv1(nn::Conv2dOptions(2, 10, { 1 , 1 }).stride(1).padding(0).with_bias(true))
{
register_module("Conv1", conv1);
}
Tensor forward(Tensor input)
{
auto x = conv1->forward(input); // <--- where exception happens.
return x;
}
nn::Conv2d conv1{nullptr};
};
int main()
{
Tensor input = torch::randn({ 2, 2, 3, 3 });
input = input.cuda();
std::cout << input << std::endl;
std::shared_ptr<ConvNet> mNet = std::make_shared<ConvNet>();
std::cout << mNet->forward(input) << std::endl;
return 0;
}
The exception still exists.