-2

每个人。我在下面显示了用于跟踪对象的代码,它还显示了背景减法结果。这里我使用帧差分法。现在我的问题是我必须从彩色视频文件中提取移动对象。我已经做了细分。但是为了检测,我想提取我绘制边界框的区域。所以任何人都可以帮助我...拜托。先感谢您。

 int main(int argc, char* argv[])
               {



                CvSize imgSize;
            //CvCapture *capture =     cvCaptureFromFile("S:\\offline object detection database\\video1.avi");
            CvCapture *capture =     cvCaptureFromFile("S:\\offline object detection database\\SINGLE PERSON Database\\Walk1.avi");

                if(!capture){
                 printf("Capture failure\n");
                 return -1;
                }

                 IplImage* frame=0;
                 frame = cvQueryFrame(capture);           
                 if(!frame)
         return -1;

                 imgSize = cvGetSize(frame);

                 IplImage* greyImage = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
                 IplImage* colourImage;
                 IplImage* movingAverage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
                 IplImage* difference;
                 IplImage* temp;
                 IplImage* motionHistory = cvCreateImage( imgSize, IPL_DEPTH_8U, 3);

                 CvRect bndRect = cvRect(0,0,0,0);
                 CvPoint pt1, pt2;
                 CvFont font;
                 int prevX = 0;
                 int numPeople = 0;
                 char wow[65];
                 int avgX = 0;
                 bool first = true;                   
                 int closestToLeft = 0;
                 int closestToRight = 320;


                 for(;;)
                {
                colourImage = cvQueryFrame(capture);
                if( !colourImage )
                {
                     break;
                }
                if(first)
                {
                 difference = cvCloneImage(colourImage);
                 temp = cvCloneImage(colourImage);
                 cvConvertScale(colourImage, movingAverage, 1.0, 0.0);
                 first = false;
                }
                else
                {
                cvRunningAvg(colourImage, movingAverage, 0.020, NULL);
                }
                cvConvertScale(movingAverage,temp, 1.0, 0.0);
            cvAbsDiff(colourImage,temp,difference);     
                cvCvtColor(difference,greyImage,CV_RGB2GRAY);       
                cvThreshold(greyImage, greyImage, 80, 250, CV_THRESH_BINARY);
        cvSmooth(greyImage, greyImage,2);
                cvDilate(greyImage, greyImage, 0, 1);
                cvErode(greyImage, greyImage, 0, 1);
        cvShowImage("back", greyImage);
                CvMemStorage* storage = cvCreateMemStorage(0);
                CvSeq* contour = 0;     
                cvFindContours( greyImage, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

        for( ; contour != 0; contour = contour->h_next )
                {
                bndRect = cvBoundingRect(contour, 0);          
            pt1.x = bndRect.x;
                pt1.y = bndRect.y;
                pt2.x = bndRect.x + bndRect.width;
                pt2.y = bndRect.y + bndRect.height;         
                avgX = (pt1.x + pt2.x) / 2;

                if(avgX > 90 && avgX < 250)
                {
                if(closestToLeft >= 88 && closestToLeft <= 90)
                {
                 if(avgX > prevX)
                {
                 numPeople++;
                 closestToLeft = 0;
                }
                }
                else if(closestToRight >= 250 && closestToRight <= 252)
                {
                if(avgX < prevX)
                {
                numPeople++;
                closestToRight = 220;
                }
                }                          
                cvRectangle(colourImage, pt1, pt2, CV_RGB(255,0,0), 1);
                }
                if(avgX > closestToLeft && avgX <= 90)
                {
                 closestToLeft = avgX;
                 }
                 if(avgX < closestToRight && avgX >= 250)
                 {
                 closestToRight = avgX;
                 }
                 prevX = avgX;
                 }
                 cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);
                 cvPutText(colourImage, _itoa(numPeople, wow, 10), cvPoint(60, 200), &font, cvScalar(0, 0, 300));
                 cvShowImage("My Window", colourImage);
         cvShowImage("fore", greyImage); 
                 cvWaitKey(10);                     
                  }
                  cvReleaseImage(&temp);
                  cvReleaseImage(&difference);
                  cvReleaseImage(&greyImage);
                  cvReleaseImage(&movingAverage);
                  cvDestroyWindow("My Window");
                  cvReleaseCapture(&capture);   
                  return 0;     
                  }
4

2 回答 2

1

在 OpenCV 的传统 C API 中,您可以使用此命令从图像中提取感兴趣的区域。在您的代码中,您只需添加这一行,图像将被视为仅包含提取的区域,几乎是:

cvSetImageROI(colourImage, bndrect);

在 OpenCV 2.0 API 中,您的旧图像和“提取区域”图像将存储在单独的 Mat 对象中,但指向相同的数据:

Mat colourImage, extractedregion;
colourImage = imread("test.bmp");
extractedregion = colourImage(bndRect);  // Creates only a header, no new image data

许多有用的 OpenCV 教程都使用旧版 API,但您应该优先使用新版 API。

于 2013-05-24T12:30:45.183 回答
0

我知道如何使用新的 OpenCV 界面而不是您正在使用的“旧版”界面来做到这一点。它会是这样的:

cv::Mat frame_m(frame);
...

cv::Mat region_m = frame_m(cv::Rect(bndRect));
IplImage region = region_m; // use &iplimg when an IplImage* is needed.

如果您不想混合界面,那么是时候学习新的界面了。

于 2013-05-24T11:27:25.200 回答