import numpy as np
import dlib
import cv2
from math import hypot
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(r"C:\Users\KIIT\OneDrive - kiit.ac.in\Documents\TTL-drowsiness-project\shape_predictor_68_face_landmarks.dat")
def mid(p1 ,p2):
return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)
def eye_aspect_ratio(eye_landmark, face_roi_landmark):
left_point = (face_roi_landmark.part(eye_landmark[0]).x, face_roi_landmark.part(eye_landmark[0]).y)
right_point = (face_roi_landmark.part(eye_landmark[3]).x, face_roi_landmark.part(eye_landmark[3]).y)
center_top = mid(face_roi_landmark.part(eye_landmark[1]), face_roi_landmark.part(eye_landmark[2]))
center_bottom = mid(face_roi_landmark.part(eye_landmark[5]), face_roi_landmark.part(eye_landmark[4]))
hor_line_length = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_length = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
ratio = hor_line_length / ver_line_length
return ratio
def mouth_aspect_ratio(lips_landmark, face_roi_landmark):
left_point = (face_roi_landmark.part(lips_landmark[0]).x, face_roi_landmark.part(lips_landmark[0]).y)
right_point = (face_roi_landmark.part(lips_landmark[2]).x, face_roi_landmark.part(lips_landmark[2]).y)
center_top = (face_roi_landmark.part(lips_landmark[1]).x, face_roi_landmark.part(lips_landmark[1]).y)
center_bottom = (face_roi_landmark.part(lips_landmark[3]).x, face_roi_landmark.part(lips_landmark[3]).y)
hor_line_length = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_length = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
ratio = ver_line_length / hor_line_length
return ratio
count = 0
font = cv2.FONT_HERSHEY_TRIPLEX
while True:
_, img = cap.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face_roi in faces:
landmark_list = predictor(gray, face_roi)
left_eye_ratio = eye_aspect_ratio([36, 37, 38, 39, 40, 41], landmark_list)
right_eye_ratio = eye_aspect_ratio([42, 43, 44, 45, 46, 47], landmark_list)
avg_eye_ratio = (left_eye_ratio + right_eye_ratio) / 2
mouth_ratio = mouth_aspect_ratio([48, 49, 50, 51, 52, 53, 54, 55, 56, 57], landmark_list)
if avg_eye_ratio < EYE_ASPECT_RATIO_THRESHOLD or mouth_ratio > MOUTH_ASPECT_RATIO_THRESHOLD:
if not yawning_alert:
yawning_alert = True
last_alert_time = time.time()
print("Yawning Alert!")
# implement any necessary action here, such as sounding an alarm
else:
yawning_alert = False
cv2.putText(img, f"EYE RATIO: {avg_eye_ratio:.2f}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, f"MOUTH RATIO: {mouth_ratio:.2f}", (50, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow('Yawning Detection', img)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break