2

我从字节数组中的android相机预览回调中获取相机帧并将其传递给jni代码。由于我们不能在 C++ 中使用字节,所以我将其转换为整数数组,如下所示:

    JNIEXPORT void JNICALL Java_com_omobio_armadillo_Armadillo_onAndroidCameraFrameNative(
            JNIEnv* env, jobject, jbyteArray data, jint dataLen, jint width,
            jint height, jint bitsPerComponent) {
        Armadillo *armadillo = Armadillo::singleton();

        jbyte *jArr = env->GetByteArrayElements(data, NULL);
        int dataChar[dataLen];
        for (int i = 0; i < dataLen; i++) {
            dataChar[i] = (int) jArr[i];
    }

然后我将它添加到 CCImage 以创建如下纹理:

 void AppClass::drawAndroidCameraFrame() {

CCLOG("drawAndroidCameraFrame");
int nextBufferIndex = !_bufferIndex;
if (mIsNewFrameReceived) {
    mIsNewFrameReceived = false;
    return;
}
CCLOG("drawAndroidCameraFrame - creating CCImage");
_image[nextBufferIndex] = new CCImage();
_image[nextBufferIndex]->initWithImageData(mFramePData, mFrameDataLen,
        mFrameFormat, mFrameWidth, mFrameHeight, mBitsPerComponent);
if (mIsNewFrameReceived) {
    CCLOG("drawAndroidCameraFrame = relasing frame image");
    _image[nextBufferIndex]->release();
    mIsNewFrameReceived = false;
    CCLOG("camera frame process cancelled 2");
    return;
}
CCLOG("drawAndroidCameraFrame - creating texture2d");
_texture[nextBufferIndex] = new CCTexture2D();
_texture[nextBufferIndex]->initWithImage(_image[nextBufferIndex]);

    if (!_videoSprite) {
    CCLOG("Creating new sprite");

    if (mIsNewFrameReceived) {
        CCLOG("drawAndroidCameraFrame - releasing image an texture");
        _image[nextBufferIndex]->release();
        _texture[nextBufferIndex]->release();
        mIsNewFrameReceived = false;
        CCLOG("camera frame process cancelled 3");
        return;
    }

    CCLOG("drawAndroidCameraFrame - creating video sprite");
    _videoSprite = new CollisionBitmapSprite();
    _videoSprite->initWithTexture(_texture[nextBufferIndex]);

    //get director
    CCDirector *director = CCDirector::sharedDirector();

    // ask director the window size
    CCSize size = director->getWinSize();
    // position the sprite on the center of the screen
    _videoSprite->setPosition(ccp(size.width/2, size.height/2));

    //get scale factor
    CCSize* imageSize = new CCSize(_image[nextBufferIndex]->getWidth(),
            _image[nextBufferIndex]->getHeight());

    CCSize scale = getCameraFrameScaleFactor(*imageSize);
    //      CCLOG ("Scale factor is x=%f and y=%f", scale.width, scale.height);

    _videoSprite->setScaleX(scale.width);
    _videoSprite->setScaleY(scale.height);

    if (mIsNewFrameReceived) {
        _image[nextBufferIndex]->release();
        _texture[nextBufferIndex]->release();
        mIsNewFrameReceived = false;
        CCLOG("camera frame process cancelled 4");
        return;
    }

    _videoSprite->setTexture(_texture[nextBufferIndex]);

                Shaders::addProgram(_videoSprite, (char *)     Shaders::textureVertShader,
    mFrameWidth, mFrameHeight);
    GLuint i =Shaders::addProgram(_videoSprite, (char *) Shaders::vertShader,
            (char *) Shaders::yuvtorgb);
        Shaders::setYuvtorgbParameters(_videoSprite,i);
    addChild(_videoSprite, -1);

} else {
    _videoSprite->setTexture(_texture[nextBufferIndex]);
}
//  CCLOG ("Armadillo::drawCameraFrame completed successfully");
//release memory
if (_image[_bufferIndex]) {
    _image[_bufferIndex]->release();
}

if (_texture[_bufferIndex]) {
    _texture[_bufferIndex]->release();
}

_bufferIndex = nextBufferIndex;

 }

由于图像是 YUV(N21) 格式,所以我将着色器应用于可以将图像帧转换为 rgb 的帧。着色器程序如下:

片段着色器:

const char *Shaders::yuvtorgb = MULTI_LINE_STRING(
        precision highp float;
        varying vec2 v_yTexCoord;
        varying vec4 v_effectTexCoord;

        uniform sampler2D y_texture;
        uniform sampler2D u_texture;
        uniform sampler2D v_texture;

        void main()
        {
            float y = texture2D(y_texture, v_yTexCoord).r;
            float u = texture2D( u_texture, v_yTexCoord ).r;
            float v = texture2D( v_texture, v_yTexCoord ).r;


            y = 1.1643 * ( y - 0.0625 );

            u = u - 0.5;
            v = v - 0.5;

            float r = y + 1.5958 * v;
            float g = y - 0.39173 * u - 0.81290 * v;
            float b = y + 2.017 * u;

            gl_FragColor = vec4(r,g,b, 1.0);
        }
);

顶点着色器:

const char *Shaders::vertShader = MULTI_LINE_STRING(
        attribute vec4 a_position;
        attribute vec2 a_yTexCoord;
        attribute vec4 a_effectTexCoord;

        varying vec2 v_yTexCoord;
        varying vec4 v_effectTexCoord;
        uniform mat4 u_MVPMatrix;
        void main()
        {
            v_yTexCoord = a_yTexCoord;
            v_effectTexCoord = a_effectTexCoord;
            gl_Position = u_MVPMatrix * a_position;
        }
);

添加程序方法:

GLuint Shaders::addProgram(CCSprite *sprite, char *vertShader,
            char*fragShader) {
        CCGLProgram *glProgram = new CCGLProgram();
        if (!glProgram->initWithVertexShaderByteArray(vertShader, fragShader)) {
        CCLOG("Shader problem: %s\n %s \n%s", glProgram->vertexShaderLog(), glProgram->fragmentShaderLog(), glProgram->programLog());
    }

    glProgram->addAttribute(kCCAttributeNamePosition, kCCVertexAttrib_Position);
    glProgram->addAttribute(kCCAttributeNameTexCoord,
            kCCVertexAttrib_TexCoords);
    if (!glProgram->link()) {
        CCLOG(
                "Shader problem: %s\n %s \n%s",      glProgram->vertexShaderLog(), glProgram->fragmentShaderLog(), glProgram->programLog());
    }
    glProgram->updateUniforms();

    sprite->setShaderProgram(glProgram);
    return glProgram->getProgram();
}

然后我将着色器应用于帧精灵:

GLuint i =Shaders::addProgram(_videoSprite, (char *) Shaders::vertShader,
                (char *) Shaders::yuvtorgb);

我得到一个绿色和粉红色的图像框架。暗部分变为绿色,亮部分显示为粉红色。

生成的图片网址如下:在此处输入图像描述

我被她困住了,没有找到任何合适的解决方案。任何人都可以帮助解决这个问题吗?

4

1 回答 1

0

I am not sure if you are trying to get the RGB from the camera in order to use it outside the phone. But maybe you can convert YUV to RGB in the android,a nd then pass the RGB pixel array?

Here is the code I used to convert to RGB:

For openning the camera I used:

try {
    camera = Camera.open();
    cameraParam = camera.getParameters();
    cameraParam.setPreviewFormat(ImageFormat.NV21);
    List<int[]> fps = cameraParam.getSupportedPreviewFpsRange();
    camera.setDisplayOrientation(90);
    camera.setParameters(cameraParam);
    cameraParam = camera.getParameters();
    camera.startPreview();

    // wait for frames to come in
    camera.setPreviewCallback(new PreviewCallback() {
        @Override
        public void onPreviewFrame(byte[] data, Camera camera) {
            frameHeight = camera.getParameters().getPreviewSize().height;
             frameWidth = camera.getParameters().getPreviewSize().width;
             int rgb[] = new int[frameWidth * frameHeight]; // number of pixels
             // the following returns a pixel array in RGB format
             byte[] bytes = decodeYUV420SP(rgb, data, frameWidth, frameHeight);
        }
    });
} catch (Exception e) {
    Log.e("camera", "  error camera  ");
}

The decodeYUV420SP I got from a differnet post you can find it here.

And here is the code itself from the above post:

//  Byte decoder : ---------------------------------------------------------------------
int[] decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
    Log.e("camera", "   decodeYUV420SP  ");
    Log.e("camera", "   Clearing Sums and Pixel Arrays  ");
    sumRED = 0;
    sumGREEN = 0;
    sumBLUE = 0;
    rStandardDeviation.clear();
    gStandardDeviation.clear();
    bStandardDeviation.clear();
    // TODO Auto-generated method stub
    final int frameSize = width * height;

    for (int j = 0, yp = 0; j < height; j++) {
        int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
        for (int i = 0; i < width; i++, yp++) {
            int y = (0xff & (yuv420sp[yp])) - 16;
            if (y < 0)
                y = 0;
            if ((i & 1) == 0) {
                v = (0xff & yuv420sp[uvp++]) - 128;
                u = (0xff & yuv420sp[uvp++]) - 128;
            }

            int y1192 = 1192 * y;
            int r = (y1192 + 1634 * v);
            int g = (y1192 - 833 * v - 400 * u);
            int b = (y1192 + 2066 * u);

            if (r < 0)
                r = 0;
            else if (r > 262143)
                r = 262143;
            if (g < 0)
                g = 0;
            else if (g > 262143)
                g = 262143;
            if (b < 0)
                b = 0;
            else if (b > 262143)
                b = 262143;

            rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);

        }
    }
    return rgb;
} 

Then once you read back the return rgb pixel array by running decodeYUV420SP you can reconstruct the image by:

Bitmap bitmap= BitmapFactory.decodeByteArray(bytes, 0, bytes.length);

Hope it helps. My code might have errors, double check stuff, but in general it works for me.

于 2013-05-07T23:53:35.293 回答