3

我有一个用 Boost Python 封装的 C++ 类。

其中一个类方法接受两个cv::Mats,如下所示:

MyClass::do_something(cv::Mat input, cv::Mat output)

我用 python 提供的功能包括上述方法、构造函数和一些打印方法。

初始化和打印方法(用于调试)在 C++ 和 Python 包装器中运行良好:

obj = MyClass(arg1, arg2, arg3)
obj.print_things()

这些调用成功完成。

我在do_something()调用时遇到了麻烦(在 Python 绑定中,它在 C++ 中成功完成):

from libmyclass import *
import cv
rgb = cv.CreateMat(256,256,cv.CV_8UC3)
result = cv.CreateMat(256,256,cv.CV_8UC3)
#...fill "rgb"

obj.do_something(rgb,result)

执行上面的python代码时出现的错误是:

Boost.Python.ArgumentError: Python argument types in
MyClass.do_something(MyClass, cv2.cv.cvmat, cv2.cv.cvmat)
did not match C++ signature:
do_something(MyClass {lvalue}, cv::Mat, cv::Mat)

这是 cv2.cv.Mat 和 cv::Mat 之间的差异吗?我有 OpenCV 2.3.1 和 2.4,都带有 Boost Python 绑定。

如果它是相关的,这就是我的 Boost 包装器的样子:

#include <boost/python.hpp>
#include "MyClass.h"
#include <cv.h>
using namespace boost::python;

BOOST_PYTHON_MODULE(libmyclass) { 
  class_<MyClass>("MyClass", init<std::string, std::string, std::string>())
    .def("print_things", &MyClass::print_things)
    .def("do_something", &MyClass::do_something)
  ;
}
4

1 回答 1

5

Boost python 不会自动将您的 cv2.cv.Mat(在 python 中)转换为 cv::Mat (C++)。

您将需要声明您的 C++ 方法以获取 boost::object * 并在 C++ 中有额外的代码将对象转换为 cv::Mat。

这是我为包装 STASM Active Shape Model 库所做的示例

#ifndef ASMSearcher_HPP
#define ASMSearcher_HPP

#include <string>
#include <boost/python.hpp>
#include <opencv2/core/core.hpp>

class ASMSearcher;

/*
 * Wrapper around STASM ASMSearcher class so that we don't mix boost python code into the STASM library.
 */

struct memtrack_t {
  PyObject_HEAD
  void *ptr;
  Py_ssize_t size;
};

struct cvmat_t
{
  PyObject_HEAD
  CvMat *a;
  PyObject *data;
  size_t offset;
};

struct iplimage_t {
  PyObject_HEAD
  IplImage *a;
  PyObject *data;
  size_t offset;
};

namespace bp = boost::python;
class Stasm
{
  public:
    Stasm();
    Stasm(const std::string &conf_file0, const std::string &conf_file1);
    ~Stasm();

    bp::list detect(bp::object image, const std::string &conf_file0="",
        const std::string &conf_file1="");

  private:
    ASMSearcher *asmLandmarksSearcher;
    cv::Mat convertObj2Mat(bp::object image);
    cv::Mat convert_from_cviplimage(PyObject *o,const char *name);
    cv::Mat convert_from_cvmat(PyObject *o, const char* name);

};

#endif



#include "stasm.hpp"
#include "stasm_ocv.hpp"

#include <opencv2/highgui/highgui.hpp>

Stasm::Stasm() 
{
  asmLandmarksSearcher = NULL;
}

Stasm::~Stasm() 
{
  if (asmLandmarksSearcher != NULL)
    delete asmLandmarksSearcher;
}

Stasm::Stasm(const std::string &conf_file0, const std::string &conf_file1)
{
  asmLandmarksSearcher = new ASMSearcher(conf_file0, conf_file1); 
}

/*Detect asm facial landmarks in image*/
bp::list Stasm::detect(bp::object image, 
    const std::string &conf_file0, 
    const std::string &conf_file1)
{

  const char *file0 = conf_file0 == "" ? NULL : conf_file0.c_str();
  const char *file1 = conf_file1 == "" ? NULL : conf_file1.c_str();

  // Convert pyobject to IplImage/Mat etc.
  cv::Mat img = convertObj2Mat(image);
  bool isColor = img.channels() == 3 ? true : false;

  int nlandmarks;
  int landmarks[500]; // space for x,y coords of up to 250 landmarks
  asmLandmarksSearcher->search(&nlandmarks, landmarks,
      "image_name", (const char*)img.data, img.cols, img.rows,
      isColor /* is_color */, file0 /* conf_file0 */, file1 /* conf_file1 */);
      //isColor /* is_color */, NULL /* conf_file0 */, NULL /* conf_file1 */);

  // Convert landmarks to python list object
  bp::list pyLandmarks;
  for (int i = 0; i < 2*nlandmarks; i++)
    pyLandmarks.append(landmarks[i]);

  return pyLandmarks;
}

cv::Mat Stasm::convert_from_cvmat(PyObject *o, const char* name)
{
  cv::Mat dest;
  cvmat_t *m = (cvmat_t*)o;
  void *buffer;
  Py_ssize_t buffer_len;

  m->a->refcount = NULL;
  if (m->data && PyString_Check(m->data))
  {
    assert(cvGetErrStatus() == 0);
    char *ptr = PyString_AsString(m->data) + m->offset;
    cvSetData(m->a, ptr, m->a->step);
    assert(cvGetErrStatus() == 0);
    dest = m->a;

  }
  else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0)
  {
    cvSetData(m->a, (void*)((char*)buffer + m->offset), m->a->step);
    assert(cvGetErrStatus() == 0);
    dest = m->a;
  }
  else
  {
    printf("CvMat argument '%s' has no data", name);
    //failmsg("CvMat argument '%s' has no data", name);
  }
  return dest;

}

cv::Mat Stasm::convert_from_cviplimage(PyObject *o,const char *name)
{
  cv::Mat dest;
  iplimage_t *ipl = (iplimage_t*)o;
  void *buffer;
  Py_ssize_t buffer_len;

  if (PyString_Check(ipl->data)) {
    cvSetData(ipl->a, PyString_AsString(ipl->data) + ipl->offset, ipl->a->widthStep);
    assert(cvGetErrStatus() == 0);
    dest = ipl->a;
  } else if (ipl->data && PyObject_AsWriteBuffer(ipl->data, &buffer, &buffer_len) == 0) {
    cvSetData(ipl->a, (void*)((char*)buffer + ipl->offset), ipl->a->widthStep);
    assert(cvGetErrStatus() == 0);
    dest = ipl->a;
  } else {
    printf("IplImage argument '%s' has no data", name);
  }
  return dest;
}

cv::Mat Stasm::convertObj2Mat(bp::object image)
{
  if(strcmp(image.ptr()->ob_type->tp_name,"cv2.cv.iplimage") == 0)
  {
    return convert_from_cviplimage(image.ptr(),image.ptr()->ob_type->tp_name);
  }
  else
    return convert_from_cvmat(image.ptr(), image.ptr()->ob_type->tp_name);
}

测试它的示例代码如下所示:

#!/usr/bin/env python

import cv2
import pystasm
import numpy as np
import sys

DEFAULT_TEST_IMAGE = "428.jpg"

def getFacePointsMapping():
  mapping = {}
  fhd = open('mapping2.txt')
  line = fhd.readline()
  a = line.split()
  for i, n in enumerate(a):
    mapping[int(n)] = i

  return mapping

def drawFaceKeypoints(img, landmarks):
  mapping = getFacePointsMapping()
  numpyLandmarks = np.asarray(landmarks)
  numLandmarks = len(landmarks) / 2
  numpyLandmarks = numpyLandmarks.reshape(numLandmarks, -1)
  for i in range(0, len(landmarks) - 1, 2):
    pt = (landmarks[i], landmarks[i+1])
    #cv2.polylines(img, [numpyLandmarks], False, (0, 255, 0))
    number = mapping[i/2]
    cv2.circle(img, pt, 3, (255, 0, 0), cv2.cv.CV_FILLED)
    cv2.putText(img, str(number), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255)) 

  return img

def getFacePointsMapping():
  mapping = []
  fhd = open('mapping2.txt')
  line = fhd.readline()
  a = line.split()
  for n in a:
    mapping.append(n)

  return mapping

def main():

  asmsearcher = pystasm.Stasm('mu-68-1d.conf', 'mu-76-2d.conf')

  if len(sys.argv) == 2:
    imagename = sys.argv[1]
  else:
    imagename = DEFAULT_TEST_IMAGE

# Detect facial keypoints in image
  img = cv2.imread(imagename)
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  landmarks = asmsearcher.detect(cv2.cv.fromarray(img))

  img = drawFaceKeypoints(img, landmarks)

  #numpyLandmarks = np.asarray(landmarks)
  #numLandmarks = len(landmarks) / 2
  #numpyLandmarks = numpyLandmarks.reshape(numLandmarks, -1)
  #for i in range(0, len(landmarks) - 1, 2):
  #  pt = (landmarks[i], landmarks[i+1])
  #  #cv2.polylines(img, [numpyLandmarks], False, (0, 255, 0))
  #  number = mapping[i/2]
  #  cv2.circle(img, pt, 3, (255, 0, 0), cv2.cv.CV_FILLED)
  #  cv2.putText(img, str(number), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255)) 

  cv2.imshow("test", img)
  cv2.waitKey()

if __name__ == '__main__':
  main()

抱歉,我没有时间清理代码。请注意,您需要调用 cv2.cv.fromarray(numpy_array) 才能使其工作。我仍在试图弄清楚如何将 numpy 数组直接传递给 python boost。如果您已经弄清楚了,请告诉我:)。

顺便说一句,我应该补充一点,用于转换 boost 对象和 opencv 的 IplImage 和 Mat 的代码取自 OpenCV 的源代码。

于 2012-10-18T12:04:22.633 回答