我对堆栈溢出问题比较陌生,但我会尽力彻底解释这个问题。
我目前正在使用 Axis IP 摄像机来获取 CARMA 板的实时视频。然后 GStreamer 使用 RTSP 客户端获取这些帧,执行 RTP 卸载,然后解码从相机发送的 h.264 图像。当我在我的计算机(目前配备 i7 处理器)上执行此过程时,没有延迟时间,并且流实时输出到屏幕,以 30 Hz 的速率更新。当我切换到我正在使用的 CARMA 板时,问题就出现了。应用程序接收器不是实时显示,而是以比正常速度慢得多的速度接收缓冲区。更具体地说,它不是以 30 Hz 的速率接收缓冲区,而是仅在 CARMA 板上没有其他处理发生时平均以大约 10 Hz 的速率接收缓冲区。还应该注意的是,没有丢帧;正在接收缓冲区的应用程序接收器正在接收所有缓冲区,但不是实时接收。任何关于为什么会发生这种情况的见解都非常感谢。我已经检查以确保时间戳也不是问题(即,如果我使用或不使用 GST 时间戳,appsink 接收缓冲区的速率不会改变)。CARMA板目前使用ubuntu 11.04,使用GCC编译。以下是一些代码片段及其各自的解释。04并使用GCC编译。以下是一些代码片段及其各自的解释。04并使用GCC编译。以下是一些代码片段及其各自的解释。
一些定义
#define APPSINK_CAPS "video/x-raw-yuv,format=(fourcc)I420"
#define RTSP_URI "rtsp://(ipaddress)/axis-media/media.amp?videocodec=h264"
#define RTSP_LATENCY 0
#define RTSP_BUFFER_MODE 0
#define RTSP_RTP_BLOCKSIZE 65536
GStreamer 管道设置代码:
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.rtspsrc = gst_element_factory_make("rtspsrc", NULL);
data.rtph264depay = gst_element_factory_make("rtph264depay", NULL);
data.nv_omx_h264dec = gst_element_factory_make("nv_omx_h264dec", NULL);
data.appsink = gst_element_factory_make("appsink", NULL);
if (!data.rtspsrc || !data.rtph264depay || !data.nv_omx_h264dec || !data.appsink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Set element properties */
g_object_set( data.rtspsrc, "location", RTSP_URI,
"latency", RTSP_LATENCY,
"buffer-mode", RTSP_BUFFER_MODE,
"rtp-blocksize", RTSP_RTP_BLOCKSIZE,
NULL);
g_object_set( data.rtph264depay, "byte-stream", FALSE, NULL);
g_object_set( data.nv_omx_h264dec, "use-timestamps", TRUE, NULL);
/* Configure appsink. This plugin will allow us to access buffer data */
GstCaps *appsink_caps;
appsink_caps = gst_caps_from_string (APPSINK_CAPS);
g_object_set (data.appsink, "emit-signals", TRUE,
"caps", appsink_caps,
NULL);
g_signal_connect (data.appsink, "new-buffer", G_CALLBACK (appsink_new_buffer), &data);
gst_caps_unref (appsink_caps);
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline) {
g_printerr ("Pipeline could not be created.");
}
/* Build the pipeline */
/* Note that we are NOT linking the source at this point. We will do it later. */
gst_bin_add_many (GST_BIN(data.pipeline),
data.rtspsrc,
data.rtph264depay,
data.nv_omx_h264dec,
data.appsink,
NULL);
if (gst_element_link (data.rtph264depay, data.nv_omx_h264dec) != TRUE) {
g_printerr ("rtph264depay and nv_omx_h264dec could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
if (gst_element_link (data.nv_omx_h264dec, data.appsink) != TRUE) {
g_printerr ("nv_omx_h264dec and appsink could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Connect to the pad-added signal (CALLBACK!) */
g_signal_connect (data.rtspsrc, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Add a probe to perform hashing on H.264 bytestream */
GstPad *rtph264depay_src_pad = gst_element_get_static_pad (data.rtph264depay, "src");
(gulong) gst_pad_add_buffer_probe (rtph264depay_src_pad, G_CALLBACK (hash_and_report), (gpointer)(&data));
gst_object_unref (rtph264depay_src_pad); //unreference the source pad
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Wait until error or EOS */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-stream reached.\n");
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n", gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
//we should not reach here because we only asked for ERRORs and EOS and State Changes
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
现在 pad_added_handler:
/* This function will be called by the pad-added signal */
//Thread 1
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad = gst_element_get_static_pad (data->rtph264depay, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "application/x-rtp")) {
g_print (" It has type '%s' which is not RTP. Ignoring.\n", new_pad_type);
goto exit;
}
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" We are already linked. Ignoring.\n");
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
现在,appsink 每次收到缓冲区时都会被调用。这是我认为(尽管不确定)没有实时接收缓冲区的功能,这使我相信我正在执行某种处理,导致在处理另一个缓冲区之前要经过太多时间:
// Called when appsink receives a buffer: Thread 1
void appsink_new_buffer (GstElement *sink, CustomData *data) {
GstBuffer *buffer;
/* Retrieve the buffer */
g_signal_emit_by_name (sink, "pull-buffer", &buffer);
if (buffer) {
(((CustomData*)data)->appsink_buffer_count)++;
//push buffer onto queue, to be processed in different thread
if (GstBufferQueue->size() > GSTBUFFERQUEUE_SIZE) {
//error message
printf ("GstBufferQueue is full!\n");
//release buffer
gst_buffer_unref (buffer);
} else {
//push onto queue
GstBufferQueue->push(buffer);
//activate thread
connectionDataAvailable_GstBufferQueue.notify_all();
}
}
}
我正在使用的相机的链接:
http://www.axis.com/products/cam_p1357/index.htm
希望这可以帮助。我将继续亲自调查此问题并在更新时提供更新。如果您需要任何其他信息,请告诉我,我期待阅读您的回复!
谢谢