我目前正在开展一个人体跌倒检测项目(基于这篇文章和这篇文章)。我正在使用 C++ 和 OpenCV 进行此操作。我一直在计算运动系数,我不完全理解它是如何产生的:
- 在第一篇文章中,我理解这应该是检测到的斑点的 MHI(图像运动历史)中所有像素的总和与检测到的斑点中所有像素的数量(总和?)的比率,
- 其次,它是检测到的blob的MHI中的像素总和与检测到的blob中所有像素的总和的比率,
问题是,无论我计算什么,我的代码都不会产生任何可能接近计算运动系数的东西。也许我对这些方程式的理解很差。
我当前的代码:
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv/cv.h>
#include <opencv2/optflow.hpp>
#include<iostream>
#include<fstream>
#include<conio.h>
#define MHI_DURATION 0.5
using namespace cv;
using namespace cv::motempl;
using namespace std;
char key;
char mode;
short frames=0;
//text file for writing Cmotion
//plik tekstowy do zapisywania wspolczynnika C
ofstream coeff("motion_coeff.txt");
//MOG2
Ptr<BackgroundSubtractorMOG2> pMOG2 = createBackgroundSubtractorMOG2(3000, 64);
int main()
{
//initial Mat's
//poczatkowe Mat-y
Mat frame;
Mat mask, bin_mask;
Mat bg;
Mat gray;
Mat mhi;
Mat eroded, dilated;
VideoCapture cap("video.mp4");
//VideoCapture cap;
//cap.open(0);
while ((char)key != 'q' && (char)key != 27) {
key = 0;
frames += 1;
double timestamp = (double)clock() / CLOCKS_PER_SEC;
//securing input
//Zabezpieczenie przed brakiem obrazu zrodlowego
if (!cap.isOpened()) {
cerr << "Undefined video source!";
exit(EXIT_FAILURE);
}
//securing next frame
//Zabezpieczenie przed brakiem nastepnej ramki
if (!cap.read(frame)) {
cerr << "Cannot read next frame!";
exit(EXIT_FAILURE);
}
//main algorithm
//dodatkowe Mat-y i zmienne pomocnicze
Mat eroded, dilated;
Mat mask, bin_mask;
double mot_coeff = 0.0;
double mhi_sum = 0.0;
double fg_sum = 0.0;
//resizing mhi
//dostosowanie formatu i wymiaru mhi
Size size = frame.size();
if (mhi.size() != size) {
mhi = Mat::zeros(size, CV_32F);
}
//morphological cleaning
//"oczyszczenie" obrazu - morfologia
erode(frame, eroded, Mat(), Point(-1, -1), 3);
dilate(eroded, dilated, Mat(6, 6, CV_32F), Point(-1, -1), 3);
//MOG2 applying to imgae
//nalozenie maski do mikstur gaussowskich
pMOG2->apply(dilated, mask);
//threshhold
//binaryzacja i poszukiwanie najwiekszego konturu
threshold(mask, bin_mask, 30, 1, THRESH_BINARY);
//calculcating MHI
//obliczenie MHI - Motion History
updateMotionHistory(bin_mask, mhi, timestamp, MHI_DURATION);
//two ways on calculating Cmotion
//policzenie bialych pikseli w obu obrazach - wykrywany ksztalt
double white_mhi = (mhi.rows * mhi.cols) - countNonZero(mhi);
double white_fg = (bin_mask.rows * bin_mask.cols) - countNonZero(bin_mask);
/*for (int i = 0; i < mhi.rows; i++) {
for (int j = 0; j < mhi.cols; j++) {
mhi_sum += mhi.at<float>(i, j);
fg_sum += bin_mask.at<uchar>(i, j);
}
}*/
//mot_coeff = white_mhi / white_fg;
mot_coeff = white_mhi / white_fg;
vector<vector<Point>> contours;
double largest_contour = 0;
int largest_id = 0;
findContours(bin_mask, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
for (int i = 0; i < contours.size(); i++) {
double area = contourArea(contours[i], false);
if (area > largest_contour) {
largest_contour = area;
largest_id = i;
}
}
//fitting ellipse to the biggest blob
//nalozenie elipsy na najwiekszy kontur
if (contours.size() > 0 & contours[largest_id].size() >= 5) {
RotatedRect ell;
ell = fitEllipse(contours[largest_id]);
ellipse(frame, ell, Scalar(0, 0, 255), 2, 8);
}
stringstream ss, ss1, ss2;
ss << "Motion coeff: " << mot_coeff;
ss1 << "MHI blob pixels: " << white_mhi;
ss2 << "FG blob pixels: " << white_fg;
//ss1 << "MHI blob pixels: " << mhi_sum;
//ss2 << "FG blob pixels: " << fg_sum;
putText(frame, ss.str(), Point(10, 10), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255), 1, 8);
putText(frame, ss1.str(), Point(10, 30), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255), 1, 8);
putText(frame, ss2.str(), Point(10, 50), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255), 1, 8);
//writing down the Cmotion to file
coeff << "Motion coefficient at frame " << frames << ": " << mot_coeff << ", scaled: " << "\n";
//showing results
imshow("Original image", frame);
imshow("MOG2 mask", mask);
imshow("MHI", mhi);
//przerwa na wcisniecie klawisza
key = (char)waitKey(30);
}
cap.release();
cv::destroyAllWindows();
return 0;
}
我非常感谢您对算法的发现!
先感谢您!