6

findContours用于斑点检测。现在,我会将紧密且相似的 blob 合并在一起。

以下是一些示例图像:

在此处输入图像描述 在此处输入图像描述 在此处输入图像描述

普通的Opencv有可能吗?

4

2 回答 2

3

The input images you gave us are pretty easy to work with:

enter image description here enter image description here enter image description here

The first step is isolate the yellow blobs from everything else and a simple color segmentation technique can accomplish this task. You can take a look at Segmentation & Object Detection by color or Tracking colored objects in OpenCV to have an idea on how to do it.

enter image description here enter image description here enter image description here

Then, it's time to merge the blobs. One technique in particular that can be useful is the bounding box, to put all the blobs inside a rectangle. Notice in the images below, that there is a green rectangle surrounding the blobs:

enter image description here enter image description here enter image description here

After that, all you need to do is fill the rectangle with the color of your choice, thus connecting all the blobs. I'm leaving this last as homework for you.

This is the quickest and most simple approach I could think of. The following code demonstrates how to achieve what I just described:

#include <cv.h>
#include <highgui.h>

#include <iostream>
#include <vector>

int main(int argc, char* argv[])
{
    cv::Mat img = cv::imread(argv[1]);
    if (!img.data)
    {
        std::cout "!!! Failed to open file: " << argv[1] << std::endl;
        return 0;
    }

    // Convert RGB Mat into HSV color space
    cv::Mat hsv;
    cv::cvtColor(img, hsv, CV_BGR2HSV);

    // Split HSV Mat into HSV components
    std::vector<cv::Mat> v;
    cv::split(hsv,v);

    // Erase pixels with low saturation
    int min_sat = 70;
    cv::threshold(v[1], v[1], min_sat, 255, cv::THRESH_BINARY);

    /* Work with the saturated image from now on */

// Erode could provide some enhancement, but I'm not sure.
//  cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
//  cv::erode(v[1], v[1], element);

    // Store the set of points in the image before assembling the bounding box
    std::vector<cv::Point> points;
    cv::Mat_<uchar>::iterator it = v[1].begin<uchar>();
    cv::Mat_<uchar>::iterator end = v[1].end<uchar>();
    for (; it != end; ++it)
    {
        if (*it) points.push_back(it.pos());
    }

    // Compute minimal bounding box
    cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));

    // Display bounding box on the original image
    cv::Point2f vertices[4];
    box.points(vertices);
    for (int i = 0; i < 4; ++i)
    {
            cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 1, CV_AA);
    }

    cv::imshow("box", img);
    //cv::imwrite(argv[2], img);

    cvWaitKey(0);

    return 0;
}
于 2012-04-24T15:09:27.903 回答
2

我想我做到了,感谢您的程序详细信息,我找到了这个解决方案:(欢迎评论)

vector<vector<Point> > contours;
    vector<vector<Point> > tmp_contours;
    findContours(detectedImg, tmp_contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    vector<vector<Point> >::iterator it1;
    it1 = tmp_contours.begin();

    Mat test;
    test = Mat(FImage.size(), CV_32FC3);

    while (it1 != tmp_contours.end()) {
        vector<Point> approx1;
        approxPolyDP(Mat(*it1), approx1, 3, true);
        Rect box1 = boundingRect(approx1);
        float area1 = contourArea(approx1);



        if ((area1 > 50) && (area1 < 13000) && (box1.width < 100) && (box1.height < 120)) {

            vector<vector<Point> >::iterator it2;
            it2 = tmp_contours.begin();

            while (it2 != tmp_contours.end()) {
                vector<Point> approx2;
                approxPolyDP(Mat(*it2), approx2, 3, true);

                Moments m1 = moments(Mat(approx1), false);
                Moments m2 = moments(Mat(approx2), false);
                float x1 = m1.m10 / m1.m00;
                float y1 = m1.m01 / m1.m00;
                float x2 = m2.m10 / m2.m00;
                float y2 = m2.m01 / m2.m00;

                vector<Point> dist;
                dist.push_back(Point(x1, y1));
                dist.push_back(Point(x2, y2));
                float d = arcLength(dist, false);

                Rect box2 = boundingRect(approx2);
                if (box1 != box2) {

                    if (d < 25) {
                        //Method to merge the vectors
                        approx1 = mergePoints(approx1, approx2);
                    }

                }
                ++it2;

            }
            Rect b = boundingRect(approx1);
            rectangle(test, b, CV_RGB(125, 255, 0), 2);
            contours.push_back(approx1);
        }
        ++it1;
    }
于 2012-04-26T12:22:13.593 回答