这是进入考试页面时的代码。我直接从我的文件夹中加载人脸以识别谁是候选人,并且识别每个注册到系统的人没有问题,但我不知道该人是否不是候选人。
我的程序是这样的:登录表格,注册表格和考试表格(在 C# 中使用 emgucv)我使用 Kelvin 的用户名和密码(已经注册到系统)登录,然后进入考试表格,一旦考试开始相机打开并面对检测到人是开尔文,但即使是已经注册或没有注册的第三方,人脸检测仍然是检测到开尔文或陌生人以外的人。我希望如果这个人不是开尔文,那么系统会暂停或提示消息告诉“错误的候选检测,单击确定再次检测”。对不起,如果有很多破英语或难以理解我的情况,但感谢您的帮助。
void FrameGrabber(object sender, EventArgs e)
{
label3.Text = "0";
//label4.Text = "";
NamePersons.Add("");
//Get the current frame form capture device
currentFrame = grabber.QueryFrame().Resize(240, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.LightGreen), 2);
if (trainingImages.ToArray().Length != 0)
{
//TermCriteria for face recognition with numbers of trained images like maxIteration
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result);
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
}
NamePersons[t - 1] = name;
NamePersons.Add("");
//Set the number of faces detected on the scene
label3.Text = facesDetected[0].Length.ToString();
}
t = 0;
这是报名表
private void button2_Click(object sender, System.EventArgs e)
{
try
{
//Trained face counter
ContTrain = ContTrain + 1;
//Get a gray frame from capture device
gray = grabber.QueryGrayFrame().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
break;
}
//resize face detected image for force to compare the same size with the
//test image with cubic interpolation type method
TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
trainingImages.Add(TrainedFace);
labels.Add(textBox1.Text);
//Show face added in gray scale
imageBox1.Image = TrainedFace;
//Write the number of triained faces in a file text for further load
File.WriteAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");
//Write the labels of triained faces in a file text for further load
for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
{
trainingImages.ToArray()[i - 1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");
}
MessageBox.Show(textBox1.Text + "Face detected and Added", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
OleDbCommand cmd = new OleDbCommand();
cmd.CommandType = CommandType.Text;
cmd.CommandText = "insert into Login (username,[password]) values ('" + textBox1.Text + "','" + textBox4.Text + "')";
cmd.Connection = conn;
conn.Open();
cmd.ExecuteNonQuery();
MessageBox.Show("User Account Succefully Created", "Caption", MessageBoxButtons.OKCancel, MessageBoxIcon.Information);
conn.Close();
textBox1.Clear();
textBox4.Clear();
button2.Enabled = false;
button3.Enabled = true;
}
catch
{
MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
}
}
void FrameGrabber(object sender, EventArgs e)
{
label3.Text = "0";
//label4.Text = "";
NamePersons.Add("");
//Get the current frame form capture device
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
t = 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.LightGreen), 2);
if (trainingImages.ToArray().Length != 0)
{
//TermCriteria for face recognition with numbers of trained images like maxIteration
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result);
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
}
NamePersons[t-1] = name;
NamePersons.Add("");
//Set the number of faces detected on the scene
label3.Text = facesDetected[0].Length.ToString();
}
t = 0;
//Names concatenation of persons recognized
for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
{
names = names + NamePersons[nnn] + ", ";
}
//Show the faces procesed and recognized
imageBoxFrameGrabber.Image = currentFrame;
label4.Text = names;
names = "";
//Clear the list(vector) of names
NamePersons.Clear();
}