0

我正在使用 OpenCV 和 Keras 制作人脸活跃度检测程序。我被这个错误困住了:

OpenCV 断言以负值失败

我尝试了所有建议的答案,但没有一个能解决我的问题。

我已经尝试过 StackOverflow 和 Github 问题上存在的所有解决方案,但在我的案例中没有一个解决方案。

video_capture = cv2.VideoCapture(0)

    model = load_model()

    print("[LOG] COLLECTING images.....")
    images = []
    for direc, _, files in tqdm(os.walk(dataset)):
        for file in files:
            if file.endswith("jpg"):
                images.append(os.path.join(direc, file))

    return model, face_detector, open_eyes_detector, left_eye__detector, right_eye_detector, video_capture, images


def process_and_encode(images):
    known_encodings = []
    known_names = []
    print("[LOG] Encoding faces....")

    for image_path in tqdm(images):

        image = cv2.imread(image_path)

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        boxes = face_recognition.face_locations(image, model='hog')
        encoding = face_recognition.face_encodings(image, boxes)
        name = image_path.split(os.path.sep)[-2]
        if len(encoding) > 0:
            known_encodings.append(encoding[0])
            known_names.append(name)
    return {"encodings": known_encodings, "names": known_names}



def detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector,
                       data, eyes_detected):
    frame = video_capture.read()

    try:
        frame = cv2.resize(frame, (0, 0), fx=0.6, fy=0.6)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        faces = face_detector.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(50, 50),
            flags=cv2.CASCADE_SCALE_IMAGE
        )
        for (x, y, w, h) in faces:
            encoding = face_recognition.face_encodings(rgb, [(y, x + w, y + h, x)])

            matches = face_recognition.compare_faces(data["encodings"], encoding)

            name = "Unknown"

            if True in matches:
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}
                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1
                name = max(counts, key=counts.get)

            face = frame[y:y + h, x:x + w]
            gray_face = gray[y:y + h, x:x + w]

            eyes = []

            open_eyes_glasses = open_eyes_detector.detectMultiScale(
                gray_face,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )
            if len(open_eyes_glasses) == 2:
                eyes_detected[name] += '1'
                for (ex, ey, ew, eh) in open_eyes_glasses:
                    cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)

                # otherwise try detecting eyes using left and right_eye_detector
                # which can detect open and closed eyes
            else:
                # separate the face into left and right sides
                left_face = frame[y:y + h, x + int(w / 2):x + w]
                left_face_gray = gray[y:y + h, x + int(w / 2):x + w]

                right_face = frame[y:y + h, x:x + int(w / 2)]
                right_face_gray = gray[y:y + h, x:x + int(w / 2)]

                # Detect the left eye
                left_eye = left_eye_detector.detectMultiScale(
                    left_face_gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE
                )

                # Detect the right eye
                right_eye = right_eye_detector.detectMultiScale(
                    right_face_gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE
                )
                eye_status = '1'  # we suppose the eyes are open

                # For each eye check wether the eye is closed.
                # If one is closed we conclude the eyes are closed
                for (ex, ey, ew, eh) in right_eye:
                    color = (0, 255, 0)
                    pred = predict(right_face[ey:ey + eh, ex:ex + ew], model)
                    if pred == 'closed':
                        eye_status = '0'
                        color = (0, 0, 255)
                    cv2.rectangle(right_face, (ex, ey), (ex + ew, ey + eh), color, 2)
                for (ex, ey, ew, eh) in left_eye:
                    color = (0, 255, 0)
                    pred = predict(left_face[ey:ey + eh, ex:ex + ew], model)
                    if pred == 'closed':
                        eye_status = '0'
                        color = (0, 0, 255)
                    cv2.rectangle(left_face, (ex, ey), (ex + ew, ey + eh), color, 2)
                eyes_detected[name] += eye_status

                # Each time, we check if the person has blinked
                # If yes, we display its name
            if isBlinking(eyes_detected[name], 3):
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                # Display name
                y = y - 15 if y - 15 > 15 else y + 15
                cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)

            return frame

    except Exception as e:
        print(str(e))


if __name__ == "__main__":
    (model, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector, video_capture, images) = init()
    data = process_and_encode(images)

    eyes_detected = defaultdict(str)
    while True:
        frame = detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector,
                                   right_eye_detector, data, eyes_detected)

        cv2.imshow("Face Liveness Detector", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
    video_capture.stop()

错误信息:-

参数 'src' Traceback 的预期 cv::UMat(最近一次调用最后):文件“C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py”,第 190 行,在 cv2.imshow("Face Liveness Detector" , frame) cv2.error: OpenCV(4.1.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:352: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'

[WARN:0] 终止异步回调

4

1 回答 1

1

那可能是因为您尝试使用imshow()但以前没有imwite()。不完全是一个解决方案,而是一个工作示例。看一看:

import cv2  # pip install opencv-python
import datetime
from cv2.data import haarcascades as hc
import requests

cap = cv2.VideoCapture(0)

faceCascade = cv2.CascadeClassifier("%s/haarcascade_frontalface_default.xml" % hc)
eye_cascade = cv2.CascadeClassifier('%s/haarcascade_eye.xml' % hc)
profile_cascade = cv2.CascadeClassifier('%s/haarcascade_profileface.xml' % hc)
fullbody_cascade = cv2.CascadeClassifier('%s/haarcascade_fullbody.xml' % hc)
smile_cascade = cv2.CascadeClassifier('%s/haarcascade_smile.xml' % hc)
eyesglasses_cascade = cv2.CascadeClassifier('%s/haarcascade_eye_tree_eyeglasses.xml' % hc)
mouth_cascade = cv2.CascadeClassifier('%s/haarcascade_mcs_mouth.xml' % hc)

filename = 'output/'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')


def recognizer():
    while True:
        ret, frame = cap.read()

        profile_count = recognize_profile(frame)
        face_count, eye_count = recognize_face(frame, True)

        cv2.imwrite('%s.png' % filename, frame)
        image = cv2.imread('%s.png' % filename)
        cv2.imshow('image', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()


def recognize_profile(frame):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    profiles = profile_cascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30)
        # flags = cv2.CV_HAAR_SCALE_IMAGE
    )
    for (x, y, w, h) in profiles:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
    return len(profiles)


def recognize_face(frame, recognize_eyes=None):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30)
        # flags = cv2.CV_HAAR_SCALE_IMAGE
    )
    eyes = []
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        if recognize_eyes:
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = frame[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            # draw bounding boxes around detected features
            for (ex, ey, ew, eh) in eyes:
                eye_center = (ex + ew // 2, ey + eh // 2)
                radius = int(round((ew + eh) * 0.25))
                cv2.circle(roi_color, eye_center, radius, (0, 0, 255), 4)
    return len(faces), len(eyes)


def snapshot():
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        recognize_profile(frame)
        recognize_face(frame, True)
        cv2.imwrite('%s.png' % filename, frame)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    cap.release()
    cv2.destroyAllWindows()


def live_video_recognizer():
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    out = cv2.VideoWriter('%s.avi' % filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
    while True:
        ret, frame = cap.read()

        recognize_profile(frame)
        recognize_face(frame, True)

        if ret is True:

            out.write(frame)

            cv2.imshow('frame', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()


recognizer()
# snapshot()
# live_video_recognizer()
于 2019-08-22T11:48:50.540 回答