我尝试了 OpenCv gpu 模块的几个功能,并将相同的行为与 visionWorks 即时代码进行了比较。令人惊讶的是,在所有情况下,OpenCv Gpu 模块的性能都比VisionWorks 快得多。
例如使用opencv手动实现的4级高斯金字塔
#include <iostream>
#include <stdio.h>
#include <stdio.h>
#include <queue>
/* OPENCV RELATED */
#include <cv.h>
#include <highgui.h>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/gpu/gpu.hpp>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
using namespace gpu;
using namespace cv::detail;
int main()
{
Mat m = imread("br1.png");
GpuMat d_m = GpuMat (m);
GpuMat d_m2;
GpuMat l1,l2,l3,l4;
int iter = 100;
int64 e = getTickCount();
float sum = 0;
sum = 0;
for(int i = 0 ; i < iter; i++)
{
e = getTickCount();
gpu::pyrDown(d_m,l1);
gpu::pyrDown(l1,l2);
gpu::pyrDown(l2,l3);
gpu::pyrDown(l3,l4);
sum+= (getTickCount() - e) / getTickFrequency();
}
cout <<"Time taken by Gussian Pyramid Level 4 \t\t\t"<<sum/iter<<" sec"<<endl;
//imwrite("cv_res.jpg",res);
return 0;
}
100 次迭代平均需要 2.5 毫秒。鉴于,VisionWorks
#include <VX/vx.h>
#include <VX/vxu.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <stdio.h>
#include <queue>
/* OPENCV RELATED */
#include <cv.h>
#include <highgui.h>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/gpu/gpu.hpp>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
using namespace gpu;
using namespace cv::detail;
vx_image createImageFromMat(vx_context& context, cv::Mat& mat);
vx_status createMatFromImage(vx_image& image, cv::Mat& mat);
/* Entry point. */
int main(int argc,char* argv[])
{
Mat cv_src1 = imread("br1.png", IMREAD_GRAYSCALE);
int width = 1280;
int height = 720;
int half_width = width/2;
int half_height = height/2;
Mat dstMat(cv_src1.size(), cv_src1.type());
Mat half_dstMat(Size(width/16,height/16),cv_src1.type());
/* Image data. */
if (cv_src1.empty() )
{
std::cerr << "Can't load input images" << std::endl;
return -1;
}
/* Create our context. */
vx_context context = vxCreateContext();
/* Image to process. */
vx_image image = createImageFromMat(context, cv_src1);
//NVXIO_CHECK_REFERENCE(image);
/* Intermediate images. */
vx_image dx = vxCreateImage(context, width, height, VX_DF_IMAGE_S16);
vx_image dy = vxCreateImage(context, width, height, VX_DF_IMAGE_S16);
vx_image mag = vxCreateImage(context, width, height, VX_DF_IMAGE_S16);
vx_image half_image = vxCreateImage(context, half_width, half_height, VX_DF_IMAGE_U8);
vx_image half_image_2 = vxCreateImage(context, half_width/2, half_height/2, VX_DF_IMAGE_U8);
vx_image half_image_3 = vxCreateImage(context, half_width/4, half_height/4, VX_DF_IMAGE_U8);
vx_image half_image_4 = vxCreateImage(context, half_width/8, half_height/8, VX_DF_IMAGE_U8);
int64 e = getTickCount();
int iter = 100;
float sum = 0.0;
e = getTickCount();
iter = 100;
for(int i = 0 ; i < iter; i ++)
{
/* RESIZEZ OPERATION */
if(vxuHalfScaleGaussian(context,image,half_image,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image,half_image_2,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image_2,half_image_3,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image_3,half_image_4,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
sum += (getTickCount() - e) / getTickFrequency();
}
cout <<"Resize to half " <<sum/iter<<endl;
createMatFromImage(half_image_4,half_dstMat);
imwrite("RES.jpg",half_dstMat);
/* Tidy up. */
vxReleaseImage(&dx);
vxReleaseImage(&dy);
vxReleaseImage(&mag);
vxReleaseContext(&context);
}
vx_image createImageFromMat(vx_context& context, cv::Mat& mat)
{
vx_imagepatch_addressing_t src_addr = {
mat.cols, mat.rows, sizeof(vx_uint8), mat.cols * sizeof(vx_uint8), VX_SCALE_UNITY, VX_SCALE_UNITY, 1, 1 };
void* src_ptr = mat.data;
vx_image image = vxCreateImageFromHandle(context, VX_DF_IMAGE_U8, &src_addr, &src_ptr, VX_IMPORT_TYPE_HOST);
return image;
}
vx_status createMatFromImage(vx_image& image, cv::Mat& mat)
{
vx_status status = VX_SUCCESS;
vx_uint8 *ptr = NULL;
cout <<"Creating image "<<mat.cols << " " <<mat.rows <<endl;
vx_rectangle_t rect;
vxGetValidRegionImage(image, &rect);
vx_imagepatch_addressing_t addr = {
mat.cols, mat.rows, sizeof(vx_uint8), mat.cols * sizeof(vx_uint8), VX_SCALE_UNITY, VX_SCALE_UNITY, 1, 1 };
status = vxAccessImagePatch(image, &rect, 0, &addr, (void **)&ptr, VX_READ_ONLY);
mat.data = ptr;
return status;
}
单次执行耗时 11.1 毫秒,100 次迭代平均耗时 96 毫秒。
如果这通常是正确的,那么 visionWorks 提供什么?
我在 Jetson TK1 上运行 L4T 的“cuda-repo-l4t-r21.3-6-5-local_6.5-50”版本