0

当瘫痪患者连续闪烁 2 次或 3 次时,我正在执行执行命令的项目(现在只是打印“用户单击:是”)。对于眨眼检测,我使用了来自“Adrian Rosebrock”的代码,该代码可以在 pyimagesearch 网站上找到。我已经对其进行了很多修改,但我被困在检测 2 次连续眨眼或 3 次连续眨眼的逻辑上。我似乎找不到从网络摄像头的视频或 LiveStream 中检测它们的方法。这个概念背后的想法在图像中呈现: Blinking Idea Project的图像. 作为输入,我拍摄了一个人单次眨眼的视频,然后他连续眨眼两次,然后连续眨眼三次。我的目的是检测所有这些眨眼。将双闪烁同时分类为 2 次闪烁。并且还将三个闪烁同时归类为 3 次闪烁。之后,我将分配一些命令。单次闪烁将执行不同的命令。两次闪烁将有不同的命令,三次闪烁将有不同的命令执行。整个代码如下:

# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import timeit
import time
import dlib
import cv2
import tkinter as tk
from tkinter import filedialog
import sys

root = tk.Tk()
root.withdraw()
click = 'YES'

def eye_aspect_ratio(eye):
    # compute the euclidean distances between the two sets of
    # vertical eye landmarks (x, y)-coordinates
    A = dist.euclidean(eye[1], eye[5])
    B = dist.euclidean(eye[2], eye[4])

    # compute the euclidean distance between the horizontal
    # eye landmark (x, y)-coordinates
    C = dist.euclidean(eye[0], eye[3])

    # compute the eye aspect ratio
    ear = (A + B) / (2.0 * C)

    # return the eye aspect ratio
    return ear

# construct the argument parse and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-p", "--shape-predictor", required=True,
#   help="path to facial landmark predictor")
# ap.add_argument("-v", "--video", type=str, default="",
#   help="path to input video file")
# args = vars(ap.parse_args())
shape_predictor =  filedialog.askopenfilename()

video = filedialog.askopenfilename()


USER_INPUT = input("Please select a mode 1 = Single Blink , 2 = Double Blink, 3 = Triple Blink:\n ")
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.3
# if USER_INPUT == "1":
#   EYE_AR_CONSEC_FRAMES = 6
# elif USER_INPUT == "2":
#   EYE_AR_CONSEC_FRAMES = 3
# elif USER_INPUT == "3":
#   EYE_AR_CONSEC_FRAMES = 0

# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
D_TOTAL = 0
D_COUNT = 0
T_TOTAL = 0
SPEED_BLINK_COUNTER = 0
S_blink = []
S_blink_show = 0
click = ''
D_blink = []
D_time = []


# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
#predictor = dlib.shape_predictor(args["shape_predictor"])
predictor =  dlib.shape_predictor(shape_predictor)


# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

# start the video stream thread
print("[INFO] starting video stream thread...")
#vs = FileVideoStream(args["video"]).start()
vs = FileVideoStream(video).start()

#fileStream = True
#vs = VideoStream(src=0).start()
#vs = VideoStream(usePiCamera=True).start()
fileStream = False
time.sleep(1.0)

# loop over frames from the video stream
while True:
    # if this is a file video stream, then we need to check if
    # there any more frames left in the buffer to process
    if fileStream and not vs.more():
        break

    # grab the frame from the threaded video file stream, resize
    # it, and convert it to grayscale
    # channels)
    frame = vs.read()
    frame = imutils.resize(frame, width=450)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # loop over the face detections
    for rect in rects:
        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # extract the left and right eye coordinates, then use the
        # coordinates to compute the eye aspect ratio for both eyes
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)

        # average the eye aspect ratio together for both eyes
        ear = (leftEAR + rightEAR) / 2.0

        # compute the convex hull for the left and right eye, then
        # visualize each of the eyes
        leftEyeHull = cv2.convexHull(leftEye)
        rightEyeHull = cv2.convexHull(rightEye)
        cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
        cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

        # check to see if the eye aspect ratio is below the blink
        # threshold, and if so, increment the blink frame counter
        # Jb tk Ankh bnd hy to ye Counter increase hta jay ga kuink for loop lga hy
        if ear < EYE_AR_THRESH:
            COUNTER += 1


        # otherwise, the eye aspect ratio is not below the blink
        # threshold
        # Jese e Ankh khulay gi aur counter 3 frames ya is sy zyada hjay ga to Total mai increment ajaye ga
        else:
            # if the eyes were closed for a sufficient number of
            # then increment the total number of blinks

            if USER_INPUT == "1" and COUNTER >= 2:
                S_blink.append(TOTAL + 1)
                S_blink_show = sum(S_blink)
                #TOTAL += 1
                #S_blink = 1

            if USER_INPUT == "2" and  COUNTER >= 2:
                D_blink = S_blink.append(TOTAL + 1)

                if D_blink is not None:
                    for i in D_blink:
                        D_time.append(timeit.timeit(str(D_blink[i]), number=1000))

                if D_time is not None:
                    for i in D_time:
                        if D_time[i+1] < D_time[i]:
                        #if timeit.timeit(str(D_blink[i+1]), number=1000) < timeit.timeit(str(D_blink[i]), number=1000):
                            D_COUNT += 1



            if USER_INPUT == "3" and  COUNTER == 0.5 :
                T_TOTAL += 1

            # reset the eye frame counter
            COUNTER = 0
            #S_blink = 0

        # draw the total number of blinks on the frame along with
        # the computed eye aspect ratio for the frame
        MODE_text = "Blink Mode"
        MODE1_text = "Single Blink Mode"
        MODE2_text = "Double Blink Mode"
        MODE3_text = "Triple Blink Mode"

        if USER_INPUT == "1":
            SPEED_BLINK_COUNTER = S_blink_show
            MODE_text = MODE1_text
            #if S_blink == 1 :
                #click = "YES"
            #else:
                #click =""
        elif USER_INPUT == "2":
            SPEED_BLINK_COUNTER = D_COUNT
            MODE_text = MODE2_text
            # if TOTAL == 0 and D_TOTAL >= 1 and COUNTER == 0:
            #   click = "YES"
            # else:
            #   click =""
        elif USER_INPUT == "3":
            SPEED_BLINK_COUNTER = T_TOTAL
            MODE_text = MODE3_text
            # if COUNTER == 1:
            #   click = "YES"
            # else:
            #   click =""

        cv2.putText(frame, "Blinks: {}".format(SPEED_BLINK_COUNTER), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.rectangle(frame, (0, 250), (450, 350), (104, 104, 104), thickness=-1, lineType=8, shift=0)
        cv2.putText(frame, "User Clicked: {}".format(click), (220, 270),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, MODE_text, (10, 270),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)



    # show the frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(2) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()

在 Furas 爵士发表评论后,我更改了代码。这就是我想出的。我现在一直在比较眨眼时间列表。索引值之间会重复相同的减法,但它们都应该根据它们的位置顺利通过。

    if COUNTER >= 2:
        S_blink.append(TOTAL + 1)
        S_blink_show = sum(S_blink)
        end = time.time()
        blink_time.append(end - start)
        # print("Blink time: " + str(blink_time))
        print("This is [::]: " + str(blink_time[::]))
        print("This is [1:]: " + str(blink_time[1:]))
        for i, j in zip(blink_time[0:], blink_time[1:]):
            #print("I'm i: " + str(i), "I'm j: " + str(j))
            time_mid.append(j - i)
            print("Time Medium: " + str(time_mid))

我现在没有主意了。任何形式的帮助将不胜感激。谢谢你

4

0 回答 0