4

这是matlab的原始代码:

 % Calculate each separated object area
    cDist=regionprops(bwImg, 'Area');
    cDist=[cDist.Area];

    % Label each object
    [bwImgLabeled, ~]=bwlabel(bwImg);

    % Calculate min and max object size based on assumptions on the color
    % checker size
    maxLabelSize = prod(size(imageData)./[4 6]);
    minLabelSize = prod(size(imageData)./[4 6]./10);

    % Find label indices for objects that are too large or too small
    remInd = find(cDist > maxLabelSize);
    remInd = [remInd find(cDist < minLabelSize)];

    % Remove over/undersized objects
    for n=1:length(remInd)
        ri = bwImgLabeled == remInd(n);
        bwImgLabeled(ri) = 0;

这是我使用openCV的代码

//regionprops(bwImg, 'Area');
// cDist=[cDist.Area]
//cv::FileStorage file("C:\\Users\\gdarmon\\Desktop\\gili.txt", cv::FileStorage::WRITE);
//
//file << dst;
dst.convertTo(dst,CV_8U);
cv::vector<cv::vector<cv::Point> > contours;
cv::vector<cv::Vec4i> hierarchy;
cv::findContours(dst,contours,hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);

std::vector<cv::Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
    mu[i] = cv::moments(contours[i],false);
}
vector<cv::Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ 
    mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); 
}

因为现在我有了轮廓,我想使用 bwlabel 函数
1。我认为标记是为了获得连接的 4-8 个对象。你能解释一下标签实际上是什么吗?我会aapriciate任何链接。
2.这篇文章中OpenCV 中的连接组件有人说的是CVblob,有人说的是opecv的cvContourArea,你能解释一下区别吗?什么更适合我的用例?

更新:这是我尝试使用 cvBlobs

IplImage* img_bw = new IplImage(dst);
CBlobResult blobs;
CBlob *currentBlob;
blobs = CBlobResult(img_bw, NULL, 0);
// Exclude all white blobs smaller than the given value (80)
// The bigger the last parameter, the bigger the blobs need
// to be for inclusion 
blobs.Filter( blobs,
    B_EXCLUDE,
    CBlobGetArea(),
    B_LESS,
    80 );

// Get the number of blobs discovered
int num_blobs = blobs.GetNumBlobs(); 

// Display the filtered blobs
IplImage* filtered = cvCreateImage( cvGetSize( img_bw ),
    IPL_DEPTH_8U,
    3 ); 

cvMerge( img_bw, img_bw, img_bw, NULL, filtered );

for ( int i = 0; i < num_blobs; i++ )
{
    currentBlob = blobs.GetBlob( i );
    currentBlob->FillBlob( filtered, CV_RGB(255,0,0));
}

// Display the input / output windows and images
cvNamedWindow( "input" );
cvNamedWindow( "output" );
cvShowImage("input", img_bw );

cvShowImage("output", filtered);
cv::waitKey(0);

 /*% Calculate min and max object size based on assumptions on the color
% checker size
maxLabelSize = prod(size(imageData)./[4 6]);
minLabelSize = prod(size(imageData)./[4 6]./10);*/
double maxLabelSize = (dst.rows/4.0) * (dst.cols/6.0);
double minLabelSize = ((dst.rows/40.0) * (dst.cols/60.0));
4

2 回答 2

24
  1. 我认为标记是为了连接 4-8 个对象。你能解释一下标签实际上是什么吗?我会aapriciate任何链接。

在 Matlab 文档中最清楚地展示了标记的实际作用bwlabel。如果将原始矩阵BW与生成的矩阵进行比较L,您会看到它采用二值图像并将唯一的标签分配给每个连接的1's 组:

L =

     1     1     1     0     0     0     0     0
     1     1     1     0     2     2     0     0
     1     1     1     0     2     2     0     0
     1     1     1     0     0     0     3     0
     1     1     1     0     0     0     3     0
     1     1     1     0     0     0     3     0
     1     1     1     0     0     3     3     0
     1     1     1     0     0     0     0     0

这里标记了三个组件。此示例查找 4 连通分量;如果一个像素位于其左侧、右侧、上方或下方,则认为该像素与当前像素相连。8-connected objects 包括对角线,这将导致标签23为上面的矩阵合并,因为 object 2 的右下角和 object 3 的顶部是对角连接的。连接组件标记算法在此处的 Wikipedia 上进行了描述。

2.本文中OpenCV中的连接组件有些人说的是CVblob,有些人说的是opecv的cvContourArea,你能解释一下区别吗?什么更适合我的用例?

OpenCV 3.0 已经过测试版并且有两个全新的方法:connectedComponentsconnectedComponentsWithStats文档)。如果你想复制 Matlab 的bwlabel,这是要走的路。

我写了一个测试程序来试用connectedComponentsWithStats(下面的完整代码),使用它作为我的测试图像:

原始图像

(实际上,这张图片从 800x600 缩小到了 400x300,但生成它的代码包含在下面。)

我使用以下方法生成了标记图像:

int nLabels = connectedComponentsWithStats(src, labels, stats, centroids, 8, CV_32S);

中返回的nLabels值为5。请记住,此方法将背景视为标签0

要查看标记区域是什么,您可以将灰度值从 放大[0..nLabels-1][0..255],或者您可以分配随机 RGB 值并创建彩色图像。对于这个测试,我只是打印了几个我知道在不同组件中的位置的值。

cout << "Show label values:" << endl;
// Middle of square at top-left
int component1Pixel = labels.at<int>(150,150);
cout << "pixel at(150,150) = " << component1Pixel << endl;
// Middle of rectangle at far right
int component2Pixel = labels.at<int>(300,550);
cout << "pixel at(300,550) = " << component2Pixel << endl << endl;

Show label values:
pixel at(150,150) = 1  
pixel at(300,550) = 2  

stats是一个包含每个组件(包括背景)的5 x nLabelsMat 。left, top, width, height, and area对于此图像:

stats:
(left,top,width,height,area)
[0, 0, 800, 600, 421697;
 100, 100, 101, 101, 10201;
 500, 150, 101, 301, 30401;
 350, 246, 10, 10, 36;
 225, 325, 151, 151, 17665]

您会注意到该组件0是图像的整个宽度/高度。将所有区域相加,您得到480,000 = 800x600. 前 4 个元素可用于创建边界矩形:

Rect(Point(left,top), Size(width,height))

centroids是一个2 x nLabelsMat 包含x, y每个组件的质心坐标:

centroids:
(x, y)
[398.8575636060963, 298.8746232484461;
 150, 150;
 550, 300;
 354.5, 250.5;
 300, 400]

最后,在某些时候,您可能想要单独对其中一个组件进行进一步处理。在这里,我compare用来生成一个新的 Mat only2,它只包含来自labelslabel的像素2

compare(labels, 2, only2, CMP_EQ);

compare有助于将这些像素设置为255新图像中的值,以便您可以看到结果:

只有组件 2

这是完整的代码:

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

using namespace std;
using namespace cv;

int main(int argc, const char * argv[]) {

    // Create an image
    const int color_white = 255;
    Mat src = Mat::zeros(600, 800, CV_8UC1);

    rectangle(src, Point(100, 100), Point(200, 200), color_white, CV_FILLED);
    rectangle(src, Point(500, 150), Point(600, 450), color_white, CV_FILLED);
    rectangle(src, Point(350,250), Point(359,251), color_white, CV_FILLED);
    rectangle(src, Point(354,246), Point(355,255), color_white, CV_FILLED);
    circle(src, Point(300, 400), 75, color_white, CV_FILLED);

    imshow("Original", src);

    // Get connected components and stats
    const int connectivity_8 = 8;
    Mat labels, stats, centroids;

    int nLabels = connectedComponentsWithStats(src, labels, stats, centroids, connectivity_8, CV_32S);

    cout << "Number of connected components = " << nLabels << endl << endl;

    cout << "Show label values:" << endl;
    int component1Pixel = labels.at<int>(150,150);
    cout << "pixel at(150,150) = " << component1Pixel << endl;
    int component2Pixel = labels.at<int>(300,550);
    cout << "pixel at(300,550) = " << component2Pixel << endl << endl;

    // Statistics
    cout << "Show statistics and centroids:" << endl;
    cout << "stats:" << endl << "(left,top,width,height,area)" << endl << stats << endl << endl;
    cout << "centroids:" << endl << "(x, y)" << endl << centroids << endl << endl;

    // Print individual stats for component 1 (component 0 is background)
    cout << "Component 1 stats:" << endl;
    cout << "CC_STAT_LEFT   = " << stats.at<int>(1,CC_STAT_LEFT) << endl;
    cout << "CC_STAT_TOP    = " << stats.at<int>(1,CC_STAT_TOP) << endl;
    cout << "CC_STAT_WIDTH  = " << stats.at<int>(1,CC_STAT_WIDTH) << endl;
    cout << "CC_STAT_HEIGHT = " << stats.at<int>(1,CC_STAT_HEIGHT) << endl;
    cout << "CC_STAT_AREA   = " << stats.at<int>(1,CC_STAT_AREA) << endl;

    // Create image with only component 2
    Mat only2;
    compare(labels, 2, only2, CMP_EQ);

    imshow("Component 2", only2);

    waitKey(0);

}
于 2015-05-15T17:39:02.380 回答
0

我使用了下面的代码,这可能会很耗时,具体取决于您的图像大小。

Mat labels;
Mat stats;
Mat centroids;
int nLabels = cv::connectedComponentsWithStats(bin_img, labels, stats, centroids,4);// bin_img is a binary image

vector<int> test;
int vector_size=0;
int temp_label_num=0;

for(int i=0; i<stats.rows; i++)
{

  int area = stats.at<int>(Point(4, i));//CC_STAT_AREA
   
  double cent_x = centroids.at<double>(i, 0);
  double cent_y = centroids.at<double>(i, 1);

  temp_label_num = labels.at<int>(cent_x,cent_y); //single value matrix

   if (area > 80) //your desired minimum area
  {
    if(temp_label_num>0){
    test.push_back(temp_label_num);
    vector_size++;
  }
    continue;      
  }
}

for(int i=0;i <labels.rows ; i++ )
  for(int j=0;j <labels.cols ; j++ )
    for(int k=0;k < vector_size; k++ )
      if(labels.at<int>(i,j)==test[k]){
        temp.at<char>(i,j)=255;
      }
于 2020-08-17T11:20:46.770 回答