I have saved the model from python and successfully loaded in libtorch c++ but on inference, it always gives me the same output no matter what the input is?
Here is my code for C++.
int main(int argc, const char* argv[]) {
//torch::DeviceType device_type = at::kCUDA;
torch::DeviceType device_type = torch::kCPU;
torch::Device device(device_type);
// Deserialize the ScriptModule from a file using torch::jit::load().
std::shared_ptr<torch::jit::script::Module> module = torch::jit::load("../model.pt");
module->to(device_type);
assert(module != nullptr);
std::cout << "ok\n";
//cv::Mat im;
cv::Mat or_img = cv::imread("../1.png");
cv::Mat res_img;
cout<< or_img.cols << or_img.rows << or_img.channels() <<std::endl;
cv::cvtColor(or_img, or_img, cv::COLOR_BGR2RGB);
cv::resize(or_img, res_img, cv::Size(416, 416));
//cout<< res_img <<std::endl;
cv::Mat img;
res_img.convertTo(img, CV_32F, 1.0 / 255);
auto img_tensor = torch::from_blob(img.data, {1, 416, 416, 3}, torch::kFloat32);;
img_tensor = img_tensor.permute({0, 3, 1, 2});
//at::Tensor tensor_image = torch::from_blob(img.data, {1, 416, 416, 3}, at::kByte);
//tensor_image = tensor_image.permute({0, 3, 1, 2});
//tensor_image = tensor_image.to(at::kFloat);
//auto img_var = torch::autograd::make_variable(tensor_image, false).to(device);
//cout<<img_var<<endl;
std::vector<torch::jit::IValue> inputs;
inputs.push_back(img_tensor);
//tensor_image.to(device_type);
// Execute the model and turn its output into a tensor.
at::Tensor output = module->forward({img_tensor}).toTensor();
std::cout << output.slice(/*dim=*/1, /*start=*/0, /*end=*/5) << '\n';
}