Hi! I’m working on a unity project that need to run my trained model inside the app. I tried to convert the .pt file into .onnx so it can be run inside the unity (using barracuda), however the prediction is changed so I couldn’t get the correct data.
This is the code that I used to export the pt file:
import torch
import torch.nn as nn
import torchvision.models as models
import numpy as np
import random
import onnxruntime as ort
import json
Set random seeds for reproducibility
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
class MyResNet18Model(nn.Module):
def init(self, num_classes=10):
super(MyResNet18Model, self).init()
self.resnet18 = models.resnet18(pretrained=False)
self.resnet18.fc = nn.Linear(self.resnet18.fc.in_features, num_classes)
def forward(self, x):
return self.resnet18(x)
model = MyResNet18Model(num_classes=10)
Load checkpoint
checkpoint = torch.load(‘./Training Model/pf_model_copy.pt’)
model.load_state_dict(checkpoint[‘state_dict’], strict=False)
Set model to evaluation mode
model.eval()
Create dummy input
dummy_input = torch.randn(1, 3, 720, 720)
Get PyTorch model output
with torch.no_grad():
pytorch_output = model(dummy_input)
Export model to ONNX format
torch.onnx.export(
model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
“./Training Model/pf_model_copy_v4.onnx”, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=[‘input’], # the model’s input names
output_names=[‘output’], # the model’s output names
dynamic_axes={‘input’: {0: ‘batch_size’}, # variable length axes
‘output’: {0: ‘batch_size’}}
)
print(“Model has been converted to ONNX format and saved as pf_model_copy_v4.onnx”)
Create ONNX runtime session
ort_session = ort.InferenceSession(“./Training Model/pf_model_copy_v4.onnx”)
Function to convert a PyTorch tensor to a NumPy array
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
Prepare inputs for ONNX runtime
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}
Run inference
ort_outs = ort_session.run(None, ort_inputs)
Print outputs
print(“PyTorch output:”, pytorch_output)
print(“ONNX output:”, ort_outs[0])
Save labels to JSON
labels = {
0: ‘Grafitti_Gorilla’,
1: ‘Hydro_Phantasm’,
2: ‘Purple_Fighter’,
3: ‘Pyro_Monkey’,
4: ‘Red_panda’,
5: ‘Smoke_Face’,
6: ‘Split_Girl’,
7: ‘Swap_Kitty’,
8: ‘Taiyaki’,
9: ‘UFO_Cat’
}
with open(‘./Training Model/labels.json’, ‘w’) as f:
json.dump(labels, f)
Does anybody can help? Thank you!