# Face detection
# opencv
import cv2
# mediapipe ai tool kit
import mediapipe as mp
# Progress bar library
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
# Define a visual image function
def look_img(img):
img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow
plt.show()
# Import BlazeFace Model
mp_face_detection = mp.solutions.face_detection
model = mp_face_detection.FaceDetection(
min_detection_confidence=0.5, # Confidence threshold , Filter out prediction boxes that are less than confidence
model_selection=1, # Choose a model ,0 It is applicable when the face is close to the camera (2 Minet ),1 Suitable for far away (5 Within meters )
)
# Import visualization functions and visualization styles
mp_drawing=mp.solutions.drawing_utils
# Key style
keypoint_style=mp_drawing.DrawingSpec(thickness=5,circle_radius=3,color=(0,255,0))
# Face prediction box style
bbox_style=mp_drawing.DrawingSpec(thickness=5,circle_radius=3,color=(255,0,0))
# Read image
img = cv2.imread('images/img.png')
# BGR turn RGB
img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# take RGB Input the prediction results of the model
results=model.process(img_RGB)
# Visualize face frames and face keys
annotated_image=img.copy()
for detection in results.detections:
mp_drawing.draw_detection(annotated_image,
detection,
keypoint_drawing_spec=keypoint_style,
bbox_drawing_spec=bbox_style)
look_img(annotated_image)
cv2.imwrite('Test.jpg',annotated_image)material

Treatment effect










