2

在 Leibniz Release 1.10 中似乎也存在关于接收颜色帧的问题:当使用 TangoService_connectOnFrameAvailable(TANGO_CAMERA_COLOR,NULL,onFrameAvailable)回调注册回调时,onFrameAvailable()将永远不会被调用或TangoService_connectOnFrameAvailable()崩溃并出现以下错误:

04-20 13:29:44.384: E/tango_client_api(4712): TangoErrorType TangoService_connectOnFrameAvailable(TangoCameraId, void*, void ( )(void , TangoCameraId, const TangoImageBuffer*)): 内部错误: connectSurface(), cam id 0, failed内部。

发行说明说

[...] config_enable_color_camera 已添加到配置标志中。如果访问彩色相机,我们建议您始终将此标志明确设置为 true。在调用 TangoService_connect() 后,您必须为 TangoService_connectOnFrameAvailable() 或 TangoService_connectTextureId() 设置标志为 true。[...]

TangoService_connect()因此,如果我在调用and之间将该标志设置为 true ,则永远不会调用TangoService_connectOnFrameAvailable()回调,如果我之前将该标志设置为 true,则将始终崩溃。onFrameAvailable()TangoService_connect() TangoService_connectOnFrameAvailable()

因此,我做错了什么?是否有可用的代码片段或其他东西?那真的很有帮助...不幸的是,这些示例都没有使用彩色框架...

伙计,在 Kalman Release 1.9 遇到类似问题后,我开始怀疑 SDK 在发布之前是否经过了彻底的测试......

4

2 回答 2

1

好吧,假设问题不是我在评论部分提到的。这是测试 onFrameAvailable 回调的代码片段。

注意:我已经为此修改了 Tango-examples-c 存储库中的HelloTangoJni 示例

在 TangoHandler.h 添加

 TangoErrorType ConnectYUVFrameCallback();

修改 TangoHandler.cc

TangoErrorType TangoHandler::SetupConfig() {
  // TANGO_CONFIG_DEFAULT is enabling Motion Tracking and disabling Depth
  // Perception.
  tango_config_ = TangoService_getConfig(TANGO_CONFIG_DEFAULT);
  if (tango_config_ == nullptr) {
  return TANGO_ERROR;
  }
  TangoConfig_setBool(tango_config_,"config_enable_color_camera",true);
  return TANGO_SUCCESS;
}


TangoErrorType TangoHandler::ConnectYUVFrameCallback() {
    TangoErrorType onFrameErrorType=TangoService_connectOnFrameAvailable( TANGO_CAMERA_COLOR, NULL, onFrameAvailable);
    if( onFrameErrorType!= TANGO_SUCCESS)
    {
         LOGI("GOOGLE TANGO ONFRAMEAVAILABLE FAILED!");
    }
    LOGI("GOOGLE TANGO ONFRAMEAVAILABLE SUCCESS!");
    return onFrameErrorType;
}

static void onFrameAvailable( void* context, const TangoCameraId id, const TangoImageBuffer* buffer )
{
  int width = buffer->width;
  int height = buffer->height;
  LOGI("width and height is: %d,%d",width,height);
}

在 TangoNative.cc 添加

JNIEXPORT jint JNICALLJava_com_projecttango_experiments_nativehellotango_TangoJNINative_connectOnFrameAvailableCallback(
JNIEnv*, jobject) 
{
    return static_cast<int>(tango_handler.ConnectYUVFrameCallback());
}

在 TangoJNINative.java 添加

// Connect the onFrameAvailable callback.
public static native int connectOnFrameAvailableCallback();

在 HelloTangoActivity.java 中修改 onResume()

protected void onResume() {
   super.onResume();
   // Setup Tango configuraturation.
   TangoJNINative.setupConfig();
   int status = 0;
   TangoJNINative.connect();
   status = TangoJNINative.connectOnFrameAvailableCallback();
   mIsTangoServiceConnected = true;
}
于 2015-04-22T22:03:53.153 回答
0

这是我将 NV21 转换为 RGB 帧的代码。也许它有什么用...

static void
  cb_onFrameAvailable
  (
    void*                     contextA,
    TangoCameraId             idA,
    const TangoImageBuffer*  imageBufferA
  )
{
  // --- local constants ------------------------------

  // image width and height
  const int W               = imageBufferA->width;
  const int H               = imageBufferA->height;

  // sizes of Y, U, and V pixel arrays.
  const int sizeOfYDataL   = W * H;

  // indices, marking the begin of the y, u, and v data in the pixel buffer.
  const int beginOfYDataL  = 0;
  const int beginOfUVDataL  = sizeOfYDataL;

  // YUV, Y, and UV pixel sub arrays.
  const byte*  yuvArrL     = imageBufferA->data;
  const byte*  yArrL       = &yuvArrL[ beginOfYDataL  ];
  const byte*  uvArrL      = &yuvArrL[ beginOfUVDataL ];

  // --- local variables ------------------------------

  // image pixel coordinates.
  int xL,yL;

  // halved image pixel coordinates.
  int hxL,hyL;

  // ARGB value.
  int argbL;

  // --------------------------------------------------

  // translate YUV NV21 -> ARGB, using
  //
  //      / R \   / 1.000   0.000   1.596 \   /   Y   \
  //      | G | = | 1.000  -0.391  -0.813 | * | U-128 |
  //      \ B /   \ 1.000   2.018   0.000 /   \ V-128 /
  //

  // Note: start value yL=1 as the first scan line of the color image is ..
  //       .. reserved for metadata instead of image pixels.

  for( yL=1,hyL=0; yL<H; yL++,hyL=yL>>1 )
  {
    for( xL=0,hxL=0; xL<W; xL++,hxL=xL>>1 )
    {
      const int y = static_cast<int>( yArrL [  yL*W +    xL   ] )      ;
      const int v = static_cast<int>( uvArrL[ hyL*W + 2*hxL   ] ) - 128;
      const int u = static_cast<int>( uvArrL[ hyL*W + 2*hxL+1 ] ) - 128;

      int R = static_cast<int>( y               + ( 1.596f*v) );
      int G = static_cast<int>( y + (-0.391f*u) + (-0.813f*v) );
      int B = static_cast<int>( y + ( 2.018f*u)               );

      // clip RGB values to [0..255].
      R = R < 0 ? 0 : (R > 255 ? 255 : R);
      G = G < 0 ? 0 : (G > 255 ? 255 : G);
      B = B < 0 ? 0 : (B > 255 ? 255 : B);

      // combine to ARGB value.
      argbL = 0xff000000 | (R << 16) | (G << 8) | B;
    } // for
  } // for
} // function
于 2015-04-22T10:11:29.293 回答