2

@robot_sherrick 回答了我这个问题,这是他回答的后续问题。

cv::SimpleBlobDetector在 Opencv 2.4 中看起来非常令人兴奋,但我不确定我是否可以让它用于更详细的数据提取。

我有以下顾虑:

  • 如果这只返回 blob 的中心,我不能有一个完整的标记 Mat,可以吗?
  • 如何访问检测到的斑点的特征,如面积、凸度、颜色等?
  • 我可以用这个显示精确的分割吗?(比如说,瀑布)
4

3 回答 3

11

所以代码应该是这样的:

cv::Mat inputImg = imread(image_file_name, CV_LOAD_IMAGE_COLOR);   // Read a file
cv::SimpleBlobDetector::Params params; 
params.minDistBetweenBlobs = 10.0;  // minimum 10 pixels between blobs
params.filterByArea = true;         // filter my blobs by area of blob
params.minArea = 20.0;              // min 20 pixels squared
params.maxArea = 500.0;             // max 500 pixels squared
SimpleBlobDetector myBlobDetector(params);
std::vector<cv::KeyPoint> myBlobs;
myBlobDetector.detect(inputImg, myBlobs);

然后,如果您想在图像上突出显示这些关键点:

cv::Mat blobImg;    
cv::drawKeypoints(inputImg, myBlobs, blobImg);
cv::imshow("Blobs", blobImg);

要访问关键点中的信息,您只需像这样访问每个元素:

for(std::vector<cv::KeyPoint>::iterator blobIterator = myBlobs.begin(); blobIterator != myBlobs.end(); blobIterator++){
   std::cout << "size of blob is: " << blobIterator->size << std::endl;
   std::cout << "point is at: " << blobIterator->pt.x << " " << blobIterator->pt.y << std::endl;
} 

注意:本文未编译,可能有错别字。

于 2012-11-23T19:42:03.797 回答
4

这是一个版本,可让您通过 getContours() 方法取回最后的轮廓。它们将通过索引与关键点匹配。

class BetterBlobDetector : public cv::SimpleBlobDetector
{
public:

    BetterBlobDetector(const cv::SimpleBlobDetector::Params &parameters = cv::SimpleBlobDetector::Params());

    const std::vector < std::vector<cv::Point> > getContours();

protected:
    virtual void detectImpl( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask=cv::Mat()) const;
    virtual void findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
                           std::vector<Center> &centers, std::vector < std::vector<cv::Point> >&contours) const;

};

然后cpp

using namespace cv;

BetterBlobDetector::BetterBlobDetector(const SimpleBlobDetector::Params &parameters)
{

}

void BetterBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
                                   vector<Center> &centers, std::vector < std::vector<cv::Point> >&curContours) const
{
    (void)image;
    centers.clear();

    curContours.clear();

    std::vector < std::vector<cv::Point> >contours;
    Mat tmpBinaryImage = binaryImage.clone();
    findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);


    for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
    {
        Center center;
        center.confidence = 1;
        Moments moms = moments(Mat(contours[contourIdx]));
        if (params.filterByArea)
        {
            double area = moms.m00;
            if (area < params.minArea || area >= params.maxArea)
                continue;
        }

        if (params.filterByCircularity)
        {
            double area = moms.m00;
            double perimeter = arcLength(Mat(contours[contourIdx]), true);
            double ratio = 4 * CV_PI * area / (perimeter * perimeter);
            if (ratio < params.minCircularity || ratio >= params.maxCircularity)
                continue;
        }

        if (params.filterByInertia)
        {
            double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
            const double eps = 1e-2;
            double ratio;
            if (denominator > eps)
            {
                double cosmin = (moms.mu20 - moms.mu02) / denominator;
                double sinmin = 2 * moms.mu11 / denominator;
                double cosmax = -cosmin;
                double sinmax = -sinmin;

                double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
                double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
                ratio = imin / imax;
            }
            else
            {
                ratio = 1;
            }

            if (ratio < params.minInertiaRatio || ratio >= params.maxInertiaRatio)
                continue;

            center.confidence = ratio * ratio;
        }

        if (params.filterByConvexity)
        {
            vector < Point > hull;
            convexHull(Mat(contours[contourIdx]), hull);
            double area = contourArea(Mat(contours[contourIdx]));
            double hullArea = contourArea(Mat(hull));
            double ratio = area / hullArea;
            if (ratio < params.minConvexity || ratio >= params.maxConvexity)
                continue;
        }

        center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);

        if (params.filterByColor)
        {
            if (binaryImage.at<uchar> (cvRound(center.location.y), cvRound(center.location.x)) != params.blobColor)
                continue;
        }

        //compute blob radius
        {
            vector<double> dists;
            for (size_t pointIdx = 0; pointIdx < contours[contourIdx].size(); pointIdx++)
            {
                Point2d pt = contours[contourIdx][pointIdx];
                dists.push_back(norm(center.location - pt));
            }
            std::sort(dists.begin(), dists.end());
            center.radius = (dists[(dists.size() - 1) / 2] + dists[dists.size() / 2]) / 2.;
        }

        centers.push_back(center);
        curContours.push_back(contours[contourIdx]);    
}

static std::vector < std::vector<cv::Point> > _contours;

const std::vector < std::vector<cv::Point> > BetterBlobDetector::getContours() {
    return _contours;
}

void BetterBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
{
    //TODO: support mask
     _contours.clear();

    keypoints.clear();
    Mat grayscaleImage;
    if (image.channels() == 3)
        cvtColor(image, grayscaleImage, CV_BGR2GRAY);
    else
        grayscaleImage = image;

    vector < vector<Center> > centers;
    vector < vector<cv::Point> >contours;
    for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
    {
        Mat binarizedImage;
        threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);

        vector < Center > curCenters;
        vector < vector<cv::Point> >curContours, newContours;
        findBlobs(grayscaleImage, binarizedImage, curCenters, curContours);
        vector < vector<Center> > newCenters;
        for (size_t i = 0; i < curCenters.size(); i++)
        {

            bool isNew = true;
            for (size_t j = 0; j < centers.size(); j++)
            {
                double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
                isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
                if (!isNew)
                {
                    centers[j].push_back(curCenters[i]);

                    size_t k = centers[j].size() - 1;
                    while( k > 0 && centers[j][k].radius < centers[j][k-1].radius )
                    {
                        centers[j][k] = centers[j][k-1];
                        k--;
                    }
                    centers[j][k] = curCenters[i];

                    break;
                }
            }
            if (isNew)
            {
                newCenters.push_back(vector<Center> (1, curCenters[i]));
                newContours.push_back(curContours[i]);
                //centers.push_back(vector<Center> (1, curCenters[i]));
            }
        }
        std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
        std::copy(newContours.begin(), newContours.end(), std::back_inserter(contours));
    }

    for (size_t i = 0; i < centers.size(); i++)
    {
        if (centers[i].size() < params.minRepeatability)
            continue;
        Point2d sumPoint(0, 0);
        double normalizer = 0;
        for (size_t j = 0; j < centers[i].size(); j++)
        {
            sumPoint += centers[i][j].confidence * centers[i][j].location;
            normalizer += centers[i][j].confidence;
        }
        sumPoint *= (1. / normalizer);
        KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
        keypoints.push_back(kpt);
        _contours.push_back(contours[i]);
    }
}
于 2014-08-06T05:22:04.837 回答
0
//Access SimpleBlobDetector datas for video

#include "opencv2/imgproc/imgproc.hpp" // 
#include "opencv2/highgui/highgui.hpp"

    #include <iostream>
    #include <math.h>
    #include <vector>
    #include <fstream>
    #include <string>
    #include <sstream>
    #include <algorithm>

#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"


using namespace cv;
using namespace std;


int main(int argc, char *argv[])
{


    const char* fileName ="C:/Users/DAGLI/Desktop/videos/new/m3.avi";  
    VideoCapture cap(fileName); //
    if(!cap.isOpened()) //
    {
        cout << "Couldn't open Video  " << fileName << "\n"; 
        return -1; 
    }
    for(;;)  // videonun frameleri icin sonsuz dongu
    {
        Mat frame,labelImg; 
        cap >> frame; 
        if(frame.empty()) break;  
        //imshow("main",frame);  

        Mat frame_gray;
        cvtColor(frame,frame_gray,CV_RGB2GRAY);


        //////////////////////////////////////////////////////////////////////////
        // convert binary_image
        Mat binaryx;
        threshold(frame_gray,binaryx,120,255,CV_THRESH_BINARY);


        Mat src, gray, thresh, binary;
        Mat out;
        vector<KeyPoint> keyPoints;

        SimpleBlobDetector::Params params;
        params.minThreshold = 120;
        params.maxThreshold = 255;
        params.thresholdStep = 100;

        params.minArea = 20; 
        params.minConvexity = 0.3;
        params.minInertiaRatio = 0.01;

        params.maxArea = 1000;
        params.maxConvexity = 10;

        params.filterByColor = false;
        params.filterByCircularity = false;



        src = binaryx.clone();

        SimpleBlobDetector blobDetector( params );
        blobDetector.create("SimpleBlob");



        blobDetector.detect( src, keyPoints );
        drawKeypoints( src, keyPoints, out, CV_RGB(255,0,0), DrawMatchesFlags::DEFAULT);


        cv::Mat blobImg;    
        cv::drawKeypoints(frame, keyPoints, blobImg);
        cv::imshow("Blobs", blobImg);

        for(int i=0; i<keyPoints.size(); i++){
            //circle(out, keyPoints[i].pt, 20, cvScalar(255,0,0), 10);
            //cout<<keyPoints[i].response<<endl;
            //cout<<keyPoints[i].angle<<endl;
            //cout<<keyPoints[i].size()<<endl;
            cout<<keyPoints[i].pt.x<<endl;
            cout<<keyPoints[i].pt.y<<endl;

        }
        imshow( "out", out );

        if ((cvWaitKey(40)&0xff)==27) break;  // esc 'ye basilinca break
    }
    system("pause");

}
于 2014-02-26T15:38:04.480 回答