python code
import torch
import torch.nn.functional as F
from torchvision import transforms
from torchvision.models import resnet18
from torch import nn
from PIL import Image
make_model_path = “d:\model.pt”
image_path = “D:\1.png”
image = Image.open(image_path).convert(‘RGB’)
transformer = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image = transformer(image)
image =image.unsqueeze(0)
device1 = torch.device(‘cpu’)
modelll = torch.jit.load(make_model_path);
outputs = modelll(image.to(device1))
_, predicted = torch.max(outputs.data, 1)
print(“end”)
c++ code
torch::jit::script::Module module;
std::ifstream is("d:\\model.pt", std::ifstream::binary);
module = torch::jit::load(is);
cv::Mat image;
image = cv::imread("D:\\1.png", 1);
cv::cvtColor(image, image, 4);
cv::Mat img_float;
image.convertTo(img_float, CV_32F, 1.0 / 255);
cv::resize(img_float, img_float, cv::Size(224, 224));
auto img_tensor = torch::from_blob(img_float.data, { 1, 224, 224, 3 });// .to(torch::kCUDA);
img_tensor = img_tensor.permute({ 0, 3, 1, 2 });
img_tensor[0][0] = img_tensor[0][0].sub(0.485).div(0.229);
img_tensor[0][1] = img_tensor[0][1].sub(0.456).div(0.224);
img_tensor[0][2] = img_tensor[0][2].sub(0.406).div(0.225);
auto img_var = torch::autograd::make_variable(img_tensor, false);
std::vector<torch::jit::IValue> inputs;
inputs.push_back(img_var);
torch::Tensor output = module.forward(inputs).toTensor().cpu();
auto accessor = output.accessor<float, 2>();
please…