这是 Android 的 OpenCV 教程代码。我记得我花了一段时间才理解 JNI 约定。先看看 JNI 代码
#include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial3_Sample3View_FindFeatures(JNIEnv* env, jobject thiz, jint width, jint height, jbyteArray yuv, jintArray bgra)
{
jbyte* _yuv = env->GetByteArrayElements(yuv, 0);
jint* _bgra = env->GetIntArrayElements(bgra, 0);
Mat myuv(height + height/2, width, CV_8UC1, (unsigned char *)_yuv);
Mat mbgra(height, width, CV_8UC4, (unsigned char *)_bgra);
Mat mgray(height, width, CV_8UC1, (unsigned char *)_yuv);
//Please make attention about BGRA byte order
//ARGB stored in java as int array becomes BGRA at native level
cvtColor(myuv, mbgra, CV_YUV420sp2BGR, 4);
vector<KeyPoint> v;
FastFeatureDetector detector(50);
detector.detect(mgray, v);
for( size_t i = 0; i < v.size(); i++ )
circle(mbgra, Point(v[i].pt.x, v[i].pt.y), 10, Scalar(0,0,255,255));
env->ReleaseIntArrayElements(bgra, _bgra, 0);
env->ReleaseByteArrayElements(yuv, _yuv, 0);
}
}
然后是Java代码
package org.opencv.samples.tutorial3;
import android.content.Context;
import android.graphics.Bitmap;
class Sample3View extends SampleViewBase {
public Sample3View(Context context) {
super(context);
}
@Override
protected Bitmap processFrame(byte[] data) {
int frameSize = getFrameWidth() * getFrameHeight();
int[] rgba = new int[frameSize];
FindFeatures(getFrameWidth(), getFrameHeight(), data, rgba);
Bitmap bmp = Bitmap.createBitmap(getFrameWidth(), getFrameHeight(), Bitmap.Config.ARGB_8888);
bmp.setPixels(rgba, 0/* offset */, getFrameWidth() /* stride */, 0, 0, getFrameWidth(), getFrameHeight());
return bmp;
}
public native void FindFeatures(int width, int height, byte yuv[], int[] rgba);
static {
System.loadLibrary("native_sample");
}
}