34.OpenCV的人脸检测和识别——示例
文章目录
- 前言
- 一、使用Haar级联检测器人脸检测(脸-眼_图片)
- 二、使用Haar级联检测器人脸检测(脸-眼_视频)
- 三、基于深度学习的人脸检测(脸-眼_图片)
- 四、基于深度学习的人脸检测(脸-眼_视频)
- 五、使用EigenFaces、FisherFaces、LBPH人脸识别器
- 总结
前言
人脸检测是指在图像中完成人脸定位的过程。人脸识别是在人脸检测的基础上进一步判断人的身份。本示例是对之前有关人脸检测和识别的代码总结。
一、使用Haar级联检测器人脸检测(脸-眼_图片)
# 使用Haar级联检测器人脸检测(脸-眼_图片)
import cv2
def polt_face(image1, faces):
for (x, y, w, h) in faces:
cv2.rectangle(image1, (x, y), (x + w, y + h), (255, 255, 0), 2)
return image1
def polt_eye(image2, faces, eyes):
for (x, y, w, h) in faces:
for ex, ey, ew, eh in eyes:
cv2.circle(image2[y : y + h , x : x + w], (int(ex + ew / 2) , int(ey + eh / 2)), int(max(ew, eh) / 2), (0,255,255), 2)
return image2
def main():
image = cv2.imread("2.jpg")
cv2.imshow("Image",image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye = cv2.CascadeClassifier("haarcascade_eye.xml")
faces = face.detectMultiScale(gray)
face_face_result = polt_face(image.copy(), faces)
for x, y, w, h in faces:
roi_eye = gray[y : y + h , x : x + w]
eyes = eye.detectMultiScale(roi_eye)
face_eye_result = polt_eye(face_face_result.copy(), faces, eyes)
cv2.imshow("face", face_face_result)
cv2.imshow("eye", face_eye_result)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
二、使用Haar级联检测器人脸检测(脸-眼_视频)
# 使用Haar级联检测器人脸检测(脸-眼_视频)
import cv2
capture = cv2.VideoCapture(0)
frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = capture.get(cv2.CAP_PROP_FPS)
face = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye = cv2.CascadeClassifier("haarcascade_eye.xml")
if capture.isOpened() is False:
print('CAMERA ERROR !')
exit(0)
while capture.isOpened():
ret, frame = capture.read()
if ret is True:
cv2.imshow('FRAME', frame)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face.detectMultiScale(gray_frame)
for x,y,w,h in faces:
cv2.rectangle(frame, (x,y),(x+w,y+h),(255,0,0),3)
roi_eye = gray_frame[y:y*h, x:x+w]
eyes = eye.detectMultiScale(roi_eye)
for ex,ey,ew,eh in eyes:
cv2.circle(frame[y : y + h , x : x + w], (int(ex + ew / 2) , int(ey + eh / 2)), int(max(ew, eh) / 2), (0,255,0), 2)
cv2.imshow('GRAY FRAME', gray_frame)
cv2.imshow('FACE', frame)
k = cv2.waitKey(100)
if k == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
三、基于深度学习的人脸检测(脸-眼_图片)
# 基于深度学习的人脸检测(脸-眼_图片)
import cv2
import numpy as np
dnnnet = cv2.dnn.readNetFromTensorflow("opencv_face_detector_uint8.pb", "opencv_face_detector.pbtxt")
img = cv2.imread("heard.jpg")
h, w = img.shape[:2]
blobs = cv2.dnn.blobFromImage(img, 1.0, (300, 300), [104., 117., 123.], False, False)
dnnnet.setInput(blobs)
detections = dnnnet.forward()
faces = 0
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.6:
faces += 1
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
x1,y1,x2,y2 = box.astype("int")
y = y1 - 10 if y1 - 10 > 10 else y1 + 10
text = "%.3f"%(confidence * 100) + '%'
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.putText(img, text, (x1, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imshow('faces',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
四、基于深度学习的人脸检测(脸-眼_视频)
# 基于深度学习的人脸检测(脸-眼_视频)
import cv2
import numpy as np
capture = cv2.VideoCapture(0)
frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = capture.get(cv2.CAP_PROP_FPS)
dnnnet = cv2.dnn.readNetFromTensorflow("opencv_face_detector_uint8.pb", "opencv_face_detector.pbtxt")
if capture.isOpened() is False:
print('CAMERA ERROR !')
exit(0)
while capture.isOpened():
ret, frame = capture.read()
if ret is True:
cv2.imshow('FRAME', frame)
h, w = frame.shape[:2]
blobs = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104., 117., 123.], False, False)
dnnnet.setInput(blobs)
detections = dnnnet.forward()
faces = 0
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.6:
faces += 1
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
x1,y1,x2,y2 = box.astype("int")
y = y1 - 10 if y1 - 10 > 10 else y1 + 10
text = "%.3f"%(confidence * 100)+'%'
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.putText(frame,text, (x1, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imshow('faces',frame)
k = cv2.waitKey(100)
if k == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
五、使用EigenFaces、FisherFaces、LBPH人脸识别器
# 使用EigenFaces、FisherFaces、LBPH人脸识别器
import cv2
import numpy as np
img11 = cv2.imread("xl11.jpg", cv2.IMREAD_GRAYSCALE)
img12 = cv2.imread("xl12.jpg", cv2.IMREAD_GRAYSCALE)
img13 = cv2.imread("xl13.jpg", cv2.IMREAD_GRAYSCALE)
img21 = cv2.imread("xl21.jpg", cv2.IMREAD_GRAYSCALE)
img22 = cv2.imread("xl22.jpg", cv2.IMREAD_GRAYSCALE)
img23 = cv2.imread("xl23.jpg", cv2.IMREAD_GRAYSCALE)
train_images = [img11, img12, img13,
img21, img22, img23]
labels = np.array([0, 0, 0,
1, 1, 1])
# recoginer = cv2.face.EigenFaceRecognizer_create()
# recoginer.train(train_images, labels)
# recoginer = cv2.face.FisherFaceRecognizer_create()
# recoginer.train(train_images, labels)
recoginer = cv2.face.LBPHFaceRecognizer_create()
recoginer.train(train_images, labels)
test_img = cv2.imread('test1.jpg', cv2.IMREAD_GRAYSCALE)
label, confidence = recoginer.predict(test_img)
print("匹配标签:",label)
print("可信度:",confidence)
程序中的训练图像(灰度图):
程序中用于测试的未知人脸图像:
程序输出结果如下:
# 六、OpenCV-Python资源下载 [OpenCV-Python测试用图片、中文官方文档、opencv-4.5.4源码](https://download.csdn.net/download/weixin_43843069/57802303)
总结
以上内容介绍了OpenCV-Python的人脸检测和识别的代码示例总结,有关Python、数据科学、人工智能等文章后续会不定期发布,请大家多多关注,一键三连哟(●’◡’●)。