我试图使用 jpegparse 而不是 h264parse 将输入源类型从视频更改为图像,但是我使用的后处理图像在人的顶部没有骨架覆盖,并且运行时的值显示检测失败。
GMainLoop *loop = NULL;
GstCaps *caps = NULL;
GstElement *pipeline = NULL, *source = NULL, *jpegparser = NULL,
*decoder = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL, *nvvidconv = NULL, *nvosd = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;
if (argc != 2)
{
g_printerr("Usage: %s <filename> \n", argv[0]);
return -1;
}
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
pipeline = gst_pipeline_new("deepstream-tensorrt-openpose-pipeline");
source = gst_element_factory_make("filesrc", "file-source");
jpegparser = gst_element_factory_make("jpegparse", "jpeg-parser");
decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
if (!pipeline || !streammux)
{
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine");
nvsink = gst_element_factory_make("nveglglessink", "nvvideo-renderer");
sink = gst_element_factory_make("fpsdisplaysink", "fps-display");
g_object_set(G_OBJECT(sink), "text-overlay", FALSE, "video-sink", nvsink, "sync", FALSE, NULL);
if (!source || !jpegparser || !decoder || !pgie || !nvvidconv || !nvosd || !sink)
{
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
g_object_set(G_OBJECT(source), "location", argv[1], NULL);
g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT, "batch-size", 1,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
g_object_set(G_OBJECT(pgie), "output-tensor-meta", TRUE,
"config-file-path", "deepstream_pose_estimation_config.txt", NULL);
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
gst_bin_add_many(GST_BIN(pipeline),
streammux, source, jpegparser, decoder, pgie,
nvvidconv, nvosd, sink, NULL);
GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = "sink_0";
gchar pad_name_src[16] = "src";
sinkpad = gst_element_get_request_pad(streammux, pad_name_sink);
if (!sinkpad)
{
g_printerr("Streammux request sink pad failed. Exiting.\n");
return -1;
}
srcpad = gst_element_get_static_pad(decoder, pad_name_src);
if (!srcpad)
{
g_printerr("Decoder request src pad failed. Exiting.\n");
return -1;
}
if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK)
{
g_printerr("Failed to link decoder to stream muxer. Exiting.\n");
return -1;
}
gst_object_unref(sinkpad);
gst_object_unref(srcpad);
if (!gst_element_link_many(source, jpegparser, decoder, NULL))
{
g_printerr("Elements could not be linked: 1. Exiting.\n");
return -1;
}
if (!gst_element_link_many(streammux, pgie, nvvidconv, nvosd, sink, NULL))
{
g_printerr("Elements could not be linked: 2. Exiting.\n");
return -1;
}
GstPad *pgie_src_pad = gst_element_get_static_pad(pgie, "src");
if (!pgie_src_pad)
g_print("Unable to get pgie src pad\n");
else
gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_src_pad_buffer_probe, (gpointer)sink, NULL);
osd_sink_pad = gst_element_get_static_pad(nvosd, "sink");
if (!osd_sink_pad)
g_print("Unable to get sink pad\n");
else
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, (gpointer)sink, NULL);
g_print("Now playing: %s\n", argv[1]);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Running...\n");
g_main_loop_run(loop);
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
我想将此解决方案用于姿势估计,但使用图像源,而不是视频源,但我不知道为什么此实现不起作用,所以如果有人有想法,请告诉我。任何帮助是极大的赞赏。