1

我是 android ndk 的新手。我已经开始通过 ruckus 和 IBM 博客的图像处理示例进行学习。我正在尝试显示图像的轮廓。这是我正在使用的代码

package com.example;

import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Bundle;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.ImageView;

public class OutlineClass extends Activity{
    private ImageView imageView;
      private Bitmap bitmap;
      private Button button;
      private Bitmap original;

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.outline);

          original  = BitmapFactory.decodeResource(getResources(), R.drawable.wallace);
          bitmap    = BitmapFactory.decodeResource(getResources(), R.drawable.wallace);
          button    = (Button) findViewById(R.id.obutton);
          imageView = (ImageView) findViewById(R.id.oimageView2);
          button.setOnClickListener(new OnClickListener() {

            public void onClick(View v) {
                  ShowOutline();

            }


        });

        }

        private void ShowOutline() {
            Bitmap oBitmap = original.copy(Bitmap.Config.ARGB_8888, true);
            Bitmap gBitmap = bitmap.copy(Bitmap.Config.ARGB_8888, true);

            showOutlineWithNative(oBitmap,gBitmap );
            imageView.setImageBitmap(gBitmap);

        }

        public native void showOutlineWithNative(Bitmap bmp1, Bitmap bmp2);
    }

这是我用来显示图像行的c代码

/*
To show outline
Pixel operation
*/
JNIEXPORT void JNICALL Java_com_example_OutlineClass_showOutlineWithNative(JNIEnv
* env, jobject  obj, jobject bitmapedges,jobject bitmapgray)
{
    AndroidBitmapInfo  infogray;
        void*              pixelsgray;
        AndroidBitmapInfo  infoedges;
        void*              pixelsedge;
        int                ret;
        int             y;
        int             x;
        int             sumX,sumY,sum;
        int             i,j;
        int                Gx[3][3];
        int                Gy[3][3];
        uint8_t            *graydata;
        uint8_t            *edgedata;


        LOGI("findEdges running");

        Gx[0][0] = -1;Gx[0][1] = 0;Gx[0][2] = 1;
        Gx[1][0] = -2;Gx[1][1] = 0;Gx[1][2] = 2;
        Gx[2][0] = -1;Gx[2][1] = 0;Gx[2][2] = 1;



        Gy[0][0] = 1;Gy[0][1] = 2;Gy[0][2] = 1;
        Gy[1][0] = 0;Gy[1][1] = 0;Gy[1][2] = 0;
        Gy[2][0] = -1;Gy[2][1] = -2;Gy[2][2] = -1;


        if ((ret = AndroidBitmap_getInfo(env, bitmapedges, &infogray)) < 0) {
            LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
            return;
        }


        if ((ret = AndroidBitmap_getInfo(env, bitmapgray, &infoedges)) < 0) {
            LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
            return;
        }




        if (infogray.format != ANDROID_BITMAP_FORMAT_A_8) {
            LOGE("Bitmap format is not A_8 !");
            return;
        }


        if (infoedges.format != ANDROID_BITMAP_FORMAT_A_8) {
            LOGE("Bitmap format is not A_8 !");
            return;
        }



        if ((ret = AndroidBitmap_lockPixels(env, bitmapedges, &pixelsgray)) < 0) {
            LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
        }

        if ((ret = AndroidBitmap_lockPixels(env, bitmapgray, &pixelsedge)) < 0) {
            LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
        }


        // modify pixels with image processing algorithm


        LOGI("time to modify pixels....");

        graydata = (uint8_t *) pixelsgray;
        edgedata = (uint8_t *) pixelsedge;

        for (y=0;y<=infogray.height - 1;y++) {
            for (x=0;x<infogray.width -1;x++) {
                sumX = 0;
                sumY = 0;
                // check boundaries
                if (y==0 || y == infogray.height-1) {
                    sum = 0;
                } else if (x == 0 || x == infogray.width -1) {
                    sum = 0;
                } else {
                    // calc X gradient
                    for (i=-1;i<=1;i++) {
                        for (j=-1;j<=1;j++) {
                            sumX += (int) ( (*(graydata + x + i + (y + j)
    * infogray.stride)) * Gx[i+1][j+1]);
                        }
                    }

                    // calc Y gradient
                    for (i=-1;i<=1;i++) {
                        for (j=-1;j<=1;j++) {
                            sumY += (int) ( (*(graydata + x + i + (y + j)
    * infogray.stride)) * Gy[i+1][j+1]);
                        }
                    }

                    sum = abs(sumX) + abs(sumY);

                }

                if (sum>255) sum = 255;
                if (sum<0) sum = 0;

                *(edgedata + x + y*infogray.width) = 255 - (uint8_t) sum;



            }
        }

        AndroidBitmap_unlockPixels(env, bitmapgray);
        AndroidBitmap_unlockPixels(env, bitmapedges);

}

我得到的结果是相同的图像,没有任何变化...我知道用于检测轮廓的 sebel 算法,但我不知道它是如何以编程方式检测边缘的,请让我知道代码中有什么问题。

提前致谢

4

1 回答 1

1

我怀疑这里有几个问题。

首先,在您的 NDK 代码中,检查输入位图是否编码为 Alpha_8 图像(即灰度图像):

if (infogray.format != ANDROID_BITMAP_FORMAT_A_8) {
        LOGE("Bitmap format is not A_8 !");
        return;
}


if (infoedges.format != ANDROID_BITMAP_FORMAT_A_8) {
        LOGE("Bitmap format is not A_8 !");
        return;
}

创建位图时,将 传递Bitmap.Config.ARGB_8888copy函数。我怀疑这会导致这些检查失败,因此函数在处理图像之前返回。您应该使用创建位图copy(Bitmap.Config.ALPHA_8, true)

在我看来,您还试图在流程结束时显示错误的位图:您以showOutlineWithNative错误的顺序传递位图。您要显示的位图是bitmapedges本机函数的参数,在这里您将显示输入的灰度图像。

总而言之,在我看来,正确的代码ShowOutline应该是:

private void ShowOutline() {
        // grayscale version of the original bitmap:
        Bitmap gray = original.copy(Bitmap.Config.ALPHA_8, true);
        // receiver bitmap for the Sobel detector:
        Bitmap output = bitmap.copy(Bitmap.Config.ALPHA_8, true);

        showOutlineWithNative(output, gray);
        imageView.setImageBitmap(output);

}

让我了解你的进展!

于 2012-10-29T14:13:16.780 回答