1

这是我的问题

Kinect 安装在房间顶部(天花板上)。然后我拍摄了 kinect 下方人员的深度图像。

所以我得到的是下面人的俯视图。

然后我想提取人头来计算人数。

在我看来,这个问题需要识别图像的 LOCAL 最小区域。但我想不出办法。

有人可以建议我实现这一目标的方法吗?

是否有获取局部最小区域的 OpenCV 函数?

谢谢你。

4

3 回答 3

0

I would go with something as simple as thresholding for near and far depths, using the and operation to merge the two and find the contours in the resulting image.

It's not super flexible as you're kind of hard coding a depth range (a minimum human height expected), but it's easy to setup/tweak and shouldn'be that costly computationally. Optionally you can use a bit of blur and erode/dilate to help refine the contours.

Although it has more than what I explained, you can see a demo here CV threshold and contours

And here's a basic example using OpenCV with OpenNI:

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

#include <iostream>

using namespace cv;
using namespace std;

int threshNear = 60;
int threshFar = 100;
int dilateAmt = 1;
int erodeAmt = 1;
int blurAmt = 1;
int blurPre = 1;
void on_trackbar(int, void*){}

int main( )
{
    VideoCapture capture;
    capture.open(CV_CAP_OPENNI);
    if( !capture.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }
    cout << "ready" << endl;
    vector<vector<Point> > contours;
    namedWindow("depth map");
    createTrackbar( "amount dilate", "depth map", &dilateAmt,16, on_trackbar );
    createTrackbar( "amount erode", "depth map", &erodeAmt,16, on_trackbar );
    createTrackbar( "amount blur", "depth map", &blurAmt,16, on_trackbar );
    createTrackbar( "blur pre", "depth map", &blurPre,1, on_trackbar );
    createTrackbar( "threshold near", "depth map", &threshNear,255, on_trackbar );
    createTrackbar( "threshold far", "depth map", &threshFar,255, on_trackbar );
    for(;;)
    {
        Mat depthMap;
        if( !capture.grab() )
        {
            cout << "Can not grab images." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) )
            {
                const float scaleFactor = 0.05f;
                Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor );
                //threshold
                Mat tnear,tfar;
                show.copyTo(tnear);
                show.copyTo(tfar);
                threshold(tnear,tnear,threshNear,255,CV_THRESH_TOZERO);
                threshold(tfar,tfar,threshFar,255,CV_THRESH_TOZERO_INV);
                show = tnear & tfar;//or cvAnd(tnear,tfar,show,NULL); to join the two thresholded images
                //filter
                if(blurPre == 1) blur(show,show,Size(blurAmt+1,blurAmt+1));
                Mat cntr; show.copyTo(cntr);
                erode(cntr,cntr,Mat(),Point(-1,-1),erodeAmt);
                if(blurPre == 0) blur(cntr,cntr,Size(blurAmt+1,blurAmt+1));
                dilate(cntr,cntr,Mat(),Point(-1,-1),dilateAmt);

                //compute and draw contours
                findContours(cntr,contours,0,1);
                drawContours(cntr,contours,-1,Scalar(192,0,0),2,3);

                //optionally compute bounding box and circle to exclude small blobs(non human) or do further filtering,etc.
                int numContours = contours.size();
                vector<vector<Point> > contours_poly( numContours );
                vector<Rect> boundRect( numContours );
                vector<Point2f> centers( numContours );
                vector<float> radii(numContours);
                for(int i = 0; i < numContours; i++ ){
                    approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
                    boundRect[i] = boundingRect( Mat(contours_poly[i]) );
                    minEnclosingCircle(contours_poly[i],centers[i],radii[i]);
                    rectangle( cntr, boundRect[i].tl(), boundRect[i].br(), Scalar(64), 2, 8, 0 );
                    circle(cntr,centers[i],radii[i],Scalar(192));
                 }

                imshow( "depth map", show );
                imshow( "contours", cntr );
            }

        }

        if( waitKey( 30 ) == 27 ) break;//exit on esc
    }
}

Even if you're not using OpenNI to grab the depth stream you can still plug the depth image into OpenCV. Also, you can detect the bounding box and circle which might help filter things further a bit. Say you're setup is in an office space, you might want to avoid a column, tall plant,shelves, etc. so you can check the bounding circle's radius or bounding box's width/height ratio.

于 2013-05-16T14:37:04.927 回答
0

我会进行前景-背景分割,将静态背景与动态“前景”(人物)分开。

然后,一旦你有了人的点云/深度图,你就可以用一些区域增长(洪水填充)方法来分割它们。这样你就可以得到分开的人,如果你专门寻找人头,你可以计算或找到他们的最低点。

于 2013-05-16T13:47:35.247 回答
0

您可以尝试分水岭变换来找到局部最小值。快速搜索找到了这个示例代码,您可能想尝试使用 OpenCV。

于 2013-05-16T10:04:09.640 回答