我目前正在创建一个 django 人脸检测/识别应用程序来标记员工出勤率,但是在捕获相机提要和检测提要上的人脸时我遇到了一些问题。
view.py 中的以下功能是检测员工面部并捕获登录员工的 300 张图像-本质上,这个问题的目的是解决面部检测问题,而问题具体在于 face_aligned = fa.align(框架,gray_frame,脸)。
视图.py:
def create_dataset(username):
id = username
if (os.path.exists('Recog_Data/Train_Data/{}/'.format(id)) == False):
os.makedirs('Recog_Data/Train_Data/{}/'.format(id))
directory = 'Recog_Data/Train_Data/{}/'.format(id)
# Detect face
# Loading the HOG face detector and the shape predictpr for allignment
print("[INFO] Loading the facial detector")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('Recog_Data/shape_predictor_68_face_landmarks.dat') # Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
fa = FaceAligner(predictor, desiredFaceWidth=256)
# capture images from the webcam and process and detect the face
# Initialize the video stream
print("[INFO] Initializing Video stream")
vs = VideoStream(src=0).start()
# time.sleep(2.0) ####CHECK######
# Our identifier
# We will put the id here and we will store the id with a face, so that later we can identify whose face it is
# Our dataset naming counter
sampleNum = 0
# Capturing the faces one by one and detect the faces and showing it on the window
while (True):
# Capturing the image
# vs.read each frame
frame = vs.read()
# Resize each image
frame = imutils.resize(frame, width=800)
# the returned img is a colored image but for the classifier to work we need a greyscale image
# to convert
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# To store the faces
# This will detect all the images in the current frame, and it will return the coordinates of the faces
# Takes in image and some other parameter for accurate result
faces = detector(gray_frame, 2)
# In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.
for face in faces:
print("inside for loop")
(x, y, w, h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame, gray_frame, face)
# Whenever the program captures the face, we will write that is a folder
# Before capturing the face, we need to tell the script whose face it is
# For that we will need an identifier, here we call it id
# So now we captured a face, we need to write it in a file
sampleNum = sampleNum + 1
# Saving the image dataset, but only the face part, cropping the rest
if face is None:
print("face is none")
continue
cv2.imwrite(directory + '/' + str(sampleNum) + '.jpg', face_aligned)
face_aligned = imutils.resize(face_aligned, width=400)
# cv2.imshow("Image Captured",face_aligned)
# @params the initial point of the rectangle will be x,y and
# @params end point will be x+width and y+height
# @params along with color of the rectangle
# @params thickness of the rectangle
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
cv2.waitKey(50)
# Showing the image in another window
# Creates a window with window name "Face" and with the image img
cv2.imshow("Add Images", frame)
# Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
cv2.waitKey(1)
# To get out of the loop
if (sampleNum > 300):
break
# Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
@login_required
def add_photos(request):
if not request.user.is_authenticated:
messages.warning(request, f'Employee not authorised')
return redirect('dashboard')
elif request.user.is_authenticated:
user = request.user
create_dataset(user)
messages.success(request, f'Dataset Created')
return redirect('dashboard')
else:
messages.warning(request, f'Error: dataset not created')
return render(request, 'dashboard')
与错误相关的堆栈跟踪:
Traceback (most recent call last):
File "/Users/xxxx/Desktop/djangoWebcamTest/venv/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/Users/xxxx/Desktop/djangoWebcamTest/venv/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/Users/xxxx/Desktop/djangoWebcamTest/venv/lib/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/Users/xxxx/Desktop/djangoWebcamTest/users/views.py", line 204, in add_photos
create_dataset(user)
File "/Users/xxxx/Desktop/djangoWebcamTest/users/views.py", line 157, in create_dataset
face_aligned = fa.align(frame, gray_frame, face)
File "/Users/xxxx/Desktop/djangoWebcamTest/venv/lib/python3.9/site-packages/imutils/face_utils/facealigner.py", line 68, in align
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
Exception Type: TypeError at /vidstream/
Exception Value: Can't parse 'center'. Sequence item with index 0 has a wrong type
这是打印到控制台的变量的屏幕截图 - face_aligned = fa.align( frame, gray_frame, face ):
对此问题的任何帮助将不胜感激