我是 android ndk 的新手。我已经开始通过 ruckus 和 IBM 博客的图像处理示例进行学习。我正在尝试显示图像的轮廓。这是我正在使用的代码
package com.example;
import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Bundle;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.ImageView;
public class OutlineClass extends Activity{
private ImageView imageView;
private Bitmap bitmap;
private Button button;
private Bitmap original;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.outline);
original = BitmapFactory.decodeResource(getResources(), R.drawable.wallace);
bitmap = BitmapFactory.decodeResource(getResources(), R.drawable.wallace);
button = (Button) findViewById(R.id.obutton);
imageView = (ImageView) findViewById(R.id.oimageView2);
button.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
ShowOutline();
}
});
}
private void ShowOutline() {
Bitmap oBitmap = original.copy(Bitmap.Config.ARGB_8888, true);
Bitmap gBitmap = bitmap.copy(Bitmap.Config.ARGB_8888, true);
showOutlineWithNative(oBitmap,gBitmap );
imageView.setImageBitmap(gBitmap);
}
public native void showOutlineWithNative(Bitmap bmp1, Bitmap bmp2);
}
这是我用来显示图像行的c代码
/*
To show outline
Pixel operation
*/
JNIEXPORT void JNICALL Java_com_example_OutlineClass_showOutlineWithNative(JNIEnv
* env, jobject obj, jobject bitmapedges,jobject bitmapgray)
{
AndroidBitmapInfo infogray;
void* pixelsgray;
AndroidBitmapInfo infoedges;
void* pixelsedge;
int ret;
int y;
int x;
int sumX,sumY,sum;
int i,j;
int Gx[3][3];
int Gy[3][3];
uint8_t *graydata;
uint8_t *edgedata;
LOGI("findEdges running");
Gx[0][0] = -1;Gx[0][1] = 0;Gx[0][2] = 1;
Gx[1][0] = -2;Gx[1][1] = 0;Gx[1][2] = 2;
Gx[2][0] = -1;Gx[2][1] = 0;Gx[2][2] = 1;
Gy[0][0] = 1;Gy[0][1] = 2;Gy[0][2] = 1;
Gy[1][0] = 0;Gy[1][1] = 0;Gy[1][2] = 0;
Gy[2][0] = -1;Gy[2][1] = -2;Gy[2][2] = -1;
if ((ret = AndroidBitmap_getInfo(env, bitmapedges, &infogray)) < 0) {
LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
return;
}
if ((ret = AndroidBitmap_getInfo(env, bitmapgray, &infoedges)) < 0) {
LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
return;
}
if (infogray.format != ANDROID_BITMAP_FORMAT_A_8) {
LOGE("Bitmap format is not A_8 !");
return;
}
if (infoedges.format != ANDROID_BITMAP_FORMAT_A_8) {
LOGE("Bitmap format is not A_8 !");
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmapedges, &pixelsgray)) < 0) {
LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
if ((ret = AndroidBitmap_lockPixels(env, bitmapgray, &pixelsedge)) < 0) {
LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
// modify pixels with image processing algorithm
LOGI("time to modify pixels....");
graydata = (uint8_t *) pixelsgray;
edgedata = (uint8_t *) pixelsedge;
for (y=0;y<=infogray.height - 1;y++) {
for (x=0;x<infogray.width -1;x++) {
sumX = 0;
sumY = 0;
// check boundaries
if (y==0 || y == infogray.height-1) {
sum = 0;
} else if (x == 0 || x == infogray.width -1) {
sum = 0;
} else {
// calc X gradient
for (i=-1;i<=1;i++) {
for (j=-1;j<=1;j++) {
sumX += (int) ( (*(graydata + x + i + (y + j)
* infogray.stride)) * Gx[i+1][j+1]);
}
}
// calc Y gradient
for (i=-1;i<=1;i++) {
for (j=-1;j<=1;j++) {
sumY += (int) ( (*(graydata + x + i + (y + j)
* infogray.stride)) * Gy[i+1][j+1]);
}
}
sum = abs(sumX) + abs(sumY);
}
if (sum>255) sum = 255;
if (sum<0) sum = 0;
*(edgedata + x + y*infogray.width) = 255 - (uint8_t) sum;
}
}
AndroidBitmap_unlockPixels(env, bitmapgray);
AndroidBitmap_unlockPixels(env, bitmapedges);
}
我得到的结果是相同的图像,没有任何变化...我知道用于检测轮廓的 sebel 算法,但我不知道它是如何以编程方式检测边缘的,请让我知道代码中有什么问题。
提前致谢