2

我在 Pytorch 中训练了一些基于 Unet 的模型。它将图像作为输入,并返回一个掩码。训练后我将它保存为ONNX格式,使用onnxruntime python 模块运行它,它就像一个魅力。

现在,我想在 Linux 的 C++ 代码中使用这个模型。

解释时是否有简单的教程(Hello world):

  • 如何将onnxruntime模块合并到 Ubuntu 中的 C++ 程序(安装共享库等)?
  • 如何正确加载图像并将其传递给模型?

PS 我只发现了这个:https : //www.onnxruntime.ai/docs/tutorials/samples_catalog.html#cc 但是没有关于加载图像并将其转换为 ONNX 的信息 - C++ 代码中的兼容格式。

4

1 回答 1

0

在 Linux 上安装,您应该参考https://www.onnxruntime.ai/。您可以参考以下代码以获取有关如何加载和运行 ONNX 模型的帮助。

#include <algorithm>  // std::generate
#include <assert.h>
#include <iostream>
#include <sstream>
#include <vector>
#include <experimental_onnxruntime_cxx_api.h>

// pretty prints a shape dimension vector
std::string print_shape(const std::vector<int64_t>& v) {
  std::stringstream ss("");
  for (size_t i = 0; i < v.size() - 1; i++)
    ss << v[i] << "x";
  ss << v[v.size() - 1];
  return ss.str();
}

int calculate_product(const std::vector<int64_t>& v) {
  int total = 1;
  for (auto& i : v) total *= i;
  return total;
}

using namespace std;

int main(int argc, char** argv) {
  if (argc != 2) {
    cout << "Usage: ./onnx-api-example <onnx_model.onnx>" << endl;
    return -1;
  }

#ifdef _WIN32
  std::string str = argv[1];
  std::wstring wide_string = std::wstring(str.begin(), str.end());
  std::basic_string<ORTCHAR_T> model_file = std::basic_string<ORTCHAR_T>(wide_string);
#else
  std::string model_file = argv[1];
#endif

  // onnxruntime setup
  Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "example-model-explorer");
  Ort::SessionOptions session_options;
  Ort::Experimental::Session session = Ort::Experimental::Session(env, model_file, session_options);  // access experimental components via the Experimental namespace

  // print name/shape of inputs
  std::vector<std::string> input_names = session.GetInputNames();
  std::vector<std::vector<int64_t> > input_shapes = session.GetInputShapes();
  cout << "Input Node Name/Shape (" << input_names.size() << "):" << endl;
  for (size_t i = 0; i < input_names.size(); i++) {
    cout << "\t" << input_names[i] << " : " << print_shape(input_shapes[i]) << endl;
  }

  // print name/shape of outputs
  std::vector<std::string> output_names = session.GetOutputNames();
  std::vector<std::vector<int64_t> > output_shapes = session.GetOutputShapes();
  cout << "Output Node Name/Shape (" << output_names.size() << "):" << endl;
  for (size_t i = 0; i < output_names.size(); i++) {
    cout << "\t" << output_names[i] << " : " << print_shape(output_shapes[i]) << endl;
  }

  // Assume model has 1 input node and 1 output node.
  assert(input_names.size() == 1 && output_names.size() == 1);

  // Create a single Ort tensor of random numbers
  auto input_shape = input_shapes[0];
  int total_number_elements = calculate_product(input_shape);
  std::vector<float> input_tensor_values(total_number_elements);
  std::generate(input_tensor_values.begin(), input_tensor_values.end(), [&] { return rand() % 255; });  // generate random numbers in the range [0, 255]
  std::vector<Ort::Value> input_tensors;
  input_tensors.push_back(Ort::Experimental::Value::CreateTensor<float>(input_tensor_values.data(), input_tensor_values.size(), input_shape));

  // double-check the dimensions of the input tensor
  assert(input_tensors[0].IsTensor() &&
         input_tensors[0].GetTensorTypeAndShapeInfo().GetShape() == input_shape);
  cout << "\ninput_tensor shape: " << print_shape(input_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl;

  // pass data through model
  cout << "Running model...";
  try {
    auto output_tensors = session.Run(session.GetInputNames(), input_tensors, session.GetOutputNames());
    cout << "done" << endl;

    // double-check the dimensions of the output tensors
    // NOTE: the number of output tensors is equal to the number of output nodes specifed in the Run() call
    assert(output_tensors.size() == session.GetOutputNames().size() &&
           output_tensors[0].IsTensor());
    cout << "output_tensor_shape: " << print_shape(output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl;

  } catch (const Ort::Exception& exception) {
    cout << "ERROR running model inference: " << exception.what() << endl;
    exit(-1);
  }
}
于 2020-12-22T06:09:28.110 回答