Getting the random output in C++

I am getting the random out put on the same image using this code in C++.
Python:

import torch
import torchvision
import torch.nn as nn

model = torchvision.models.resnet18()

# An example input you would normally provide to your model's forward() method.
model.cuda()
example = torch.rand(1, 3, 224, 224).cuda()

# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
with torch.no_grad():
    traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("resnet18.pt")

C++

#include <torch/script.h>               // One-stop header.
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>

#include <chrono>
#include <iostream>
#include <memory>

using namespace std;
using namespace std::chrono;

int main(int argc, const char* argv[]) {
    if (argc != 3) {
        std::cerr << "usage: resnet_libtorch <path-to-resnet-script-module> <path-to-input-image>\n";
        return -1;
    }
    std::cout << cv::getBuildInformation << endl;
    // Configuration
    int input_image_size = 224;
    int batch_size = 1;

    // Deserialize the ScriptModule from a file using torch::jit::load().
    //torch::jit::script::Module module
    std::shared_ptr<torch::jit::script::Module> module = torch::jit::load(argv[1]);

    // Read input image
    cv::Mat origin_image = cv::imread(argv[2], CV_LOAD_IMAGE_COLOR);

    // Preprocess image (resize, put on GPU)
    cv::Mat resized_image;
    cv::cvtColor(origin_image, resized_image, cv::COLOR_RGB2BGR);
    cv::resize(resized_image, resized_image, cv::Size(input_image_size, input_image_size));

    cv::Mat img_float;
    resized_image.convertTo(img_float, CV_32F, 1.0 / 255);

    while(true){
        std::vector<at::Tensor> inputs_vec;
        for (int i = 0; i < 8; i++) {
        //auto img_tensor = torch::from_blob(img_float.data, {1, input_image_size, input_image_size, 3}).to(torch::kCUDA)
        auto img_tensor = torch::CPU(torch::kFloat32).tensorFromBlob(img_float.data, {1, input_image_size, input_image_size, 3});
        img_tensor = img_tensor.permute({0, 3, 1, 2});
        img_tensor[0][0] = img_tensor[0][0].sub(0.485).div(0.229);
        img_tensor[0][1] = img_tensor[0][1].sub(0.456).div(0.224);
        img_tensor[0][2] = img_tensor[0][2].sub(0.406).div(0.225);
        auto img_var = torch::autograd::make_variable(img_tensor, false);
        inputs_vec.push_back(img_var);
        }

        // Create a vector of inputs.
        std::vector<torch::jit::IValue> inputs;
        //for (int i = 0; i < 8; i++) {
        //inputs.push_back(img_var.to(at::kCUDA));
        at::Tensor input_ = torch::cat(inputs_vec);
        inputs.push_back(input_.to(at::kCUDA));
        //inputs.push_back(torch::ones({10, 3, 224, 224}).to(at::kCUDA));
        //}

        // Execute the model and turn its output into a tensor.
        torch::Tensor output;
        auto duration = duration_cast<milliseconds>(std::chrono::high_resolution_clock::now()-std::chrono::high_resolution_clock::now());
            auto start = std::chrono::high_resolution_clock::now();
            output = module->forward(inputs).toTensor();
            auto end = std::chrono::high_resolution_clock::now();
            duration = duration_cast<milliseconds>(end - start);
            //std::cout << output[0] << '\n';
            for(int i =0; i<output.size(0); i++){
                at::Tensor max_ind = at::argmax(output[i]);
                std::cout << "class_id: " << i << " :" <<max_ind.item<int>() << std::endl;
            }
        
        // at::Tensor max_ind = at::argmax(output);
        // std::cout << "class_id: " << max_ind.item<int>() << std::endl;
        // std::cout << "Time take for forward pass: " << duration.count() << " ms" << std::endl;
        break;
    }
    std::cout << "Done\n";
}

it gives random output as class id on the same input image

Hi,

Some of your codes seems to be still using obsolete commands as describe in

following codes works for CPU with ver 1.3, you might want to modified to work for CUDA. Also I’ve hard-coded the model and image for quick test, so do replace with your existing one.

		int input_image_size = 224;
		int batch_size = 1;

		// Deserialize the ScriptModule from a file using torch::jit::load().
		//torch::jit::script::Module module
		//std::shared_ptr<torch::jit::script::Module> module = torch::jit::load("resnet18.pt");
		torch::jit::script::Module module = torch::jit::load("resnet18.pt");

		// Read input image
		cv::Mat origin_image = cv::imread("cat.jpg", 1);

		// Preprocess image (resize, put on GPU)
		cv::Mat resized_image;
		cv::cvtColor(origin_image, resized_image, cv::COLOR_RGB2BGR);
		cv::resize(resized_image, resized_image, cv::Size(input_image_size, input_image_size));

		cv::Mat img_float;
		resized_image.convertTo(img_float, CV_32F, 1.0 / 255);

		while (true) {
			std::vector<at::Tensor> inputs_vec;
			for (int i = 0; i < 8; i++) {
				//auto img_tensor = torch::from_blob(img_float.data, {1, input_image_size, input_image_size, 3}).to(torch::kCUDA)
				auto img_tensor = torch::from_blob(img_float.data, { 1, input_image_size, input_image_size, 3 }	, torch::kFloat);
				img_tensor = img_tensor.permute({ 0, 3, 1, 2 });
				img_tensor[0][0] = img_tensor[0][0].sub(0.485).div(0.229);
				img_tensor[0][1] = img_tensor[0][1].sub(0.456).div(0.224);
				img_tensor[0][2] = img_tensor[0][2].sub(0.406).div(0.225);
				//auto img_var = torch::autograd::make_variable(img_tensor, false);
				inputs_vec.push_back(img_tensor);
			}

			// Create a vector of inputs.
			std::vector<torch::jit::IValue> inputs;
			//for (int i = 0; i < 8; i++) {
			//inputs.push_back(img_var.to(at::kCUDA));
			at::Tensor input_ = torch::cat(inputs_vec);
			//inputs.push_back(input_.to(at::kCUDA));
			inputs.push_back(input_);
			//inputs.push_back(torch::ones({10, 3, 224, 224}).to(at::kCUDA));
			//}

			// Execute the model and turn its output into a tensor.
			torch::Tensor output;
			auto duration = duration_cast<milliseconds>(std::chrono::high_resolution_clock::now() - std::chrono::high_resolution_clock::now());
			auto start = std::chrono::high_resolution_clock::now();
			output = module.forward(inputs).toTensor();
			auto end = std::chrono::high_resolution_clock::now();
			duration = duration_cast<milliseconds>(end - start);
			//std::cout << output[0] << '\n';
			for (int i = 0; i<output.size(0); i++) {
				at::Tensor max_ind = at::argmax(output[i]);
				std::cout << "class_id: " << i << " :" << max_ind.item<int>() << std::endl;
				
			}

			// at::Tensor max_ind = at::argmax(output);
			// std::cout << "class_id: " << max_ind.item<int>() << std::endl;
			// std::cout << "Time take for forward pass: " << duration.count() << " ms" << std::endl;
			break;
		}
		std::cout << "Done\n";```
class_id: 24
class_id: 7
class_id: 19
class_id: 7
class_id: 7
class_id: 27
class_id: 19
class_id: 7

I am getting output like this, where the input image for all of them is same and actual classid is 7 but it randomly gives other values as well. While in pytorch when i am doing this it works perfectly fine.

I have also updated the libtorch version and so the code.

this is weird. my results are quite consistent

–> int_torch_test()
class_id: 237
class_id: 237
class_id: 237
class_id: 237
class_id: 237
class_id: 237
class_id: 237
class_id: 237
Done

I start to think whether there could be different results due to the use of GPU, could repost your new codes again I try to see whether I could get access to GPU and try it out.

thanks.

rgds,
CL

Thanks alot for your help setting the module.eval() it is giving me the correct results.

1 Like

Thanks for the update. I thought the no_grad() doing the same job with eval(), as it happened to my system. anyway good to hear you’ve solved your problem, will put this in mind for future exploration on this. thanks.

rgds,
CL