Hello,
I have a question regarding how to load a model in pytorch, previously saved in pytorch c++.
This is the model in C++:
struct Net : torch::nn::Module
{
Net(int inputSize, int hiddenSize, int numClasses):
fc1(register_module(“fc1”, torch::nn::Linear(inputSize, hiddenSize))),
fc2(register_module(“fc2”, torch::nn::Linear(hiddenSize, numClasses))) {};
torch::nn::Linear fc1;
torch::nn::Linear fc2;
torch::Tensor forward(torch::Tensor input)
{
auto out = fc1(input);
out = torch::relu(out);
out = fc2(out);
return out;
}
};
and I saved it like this:
std::string modelPath = "model.pt";
torch::serialize::OutputArchive outputArchive;
model.save(outputArchive);
outputArchive.save_to(modelPath);
On the Python side this is my model:
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = nn.Relu(out)
out = self.fc2(out)
return out
but I don’t know how to create a model object from the “model.pt” file …
Thanks for any help!
Best,
Ivan