# ulvis.paste.net

Paste Search Dynamic
Recent pastes
eye aspect ratio
1. # USAGE
2. # python detect_drowsiness.py --shape-predictor shape_predictor_68_face_landmarks.dat
3. # python detect_drowsiness.py --shape-predictor shape_predictor_68_face_landmarks.dat --alarm alarm.wav
4.
5. # import the necessary packages
6. from PIL import Image
7. from sklearn.naive_bayes import GaussianNB
8. from scipy.spatial import distance as dist
9. from imutils.video import VideoStream
10. from imutils import face_utils
12. import numpy as np
13. import playsound
14. import argparse
15. import imutils
16. import time
17. import dlib
18. import cv2
19.
20.
21. def eye_aspect_ratio(eye):
22.     # compute the euclidean distances between the two sets of
23.     # vertical eye landmarks (x, y)-coordinates
24.     A = dist.euclidean(eye[1], eye[5])
25.     B = dist.euclidean(eye[2], eye[4])
26.
27.     # compute the euclidean distance between the horizontal
28.     # eye landmark (x, y)-coordinates
29.     C = dist.euclidean(eye[0], eye[3])
30.
31.     # compute the eye aspect ratio
32.     ear = (A + B) / (2.0 * C)
33.
34.     # return the eye aspect ratio
35.     return ear
36.
37.
38. def my_functions(frame, shape):
39.     COUNTER = 0
40.     i = 0
41.
42.     # determine the facial landmarks for the face region, then
43.     # convert the facial landmark (x, y)-coordinates to a NumPy
44.     # array
45.
46.     shape = face_utils.shape_to_np(shape)
47.
48.     # extract the left and right eye coordinates, then use the
49.     # coordinates to compute the eye aspect ratio for both eyes
50.     leftEye = shape[lStart:lEnd]
51.     rightEye = shape[rStart:rEnd]
52.     leftEAR = eye_aspect_ratio(leftEye)
53.     rightEAR = eye_aspect_ratio(rightEye)
54.
55.     # average the eye aspect ratio together for both eyes
56.     ear = (leftEAR + rightEAR) / 2.0
57.
58.     # compute the convex hull for the left and right eye, then
59.     # visualize each of the eyes
60.     leftEyeHull = cv2.convexHull(leftEye)
61.     rightEyeHull = cv2.convexHull(rightEye)
62.     cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
63.     cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
64.
65.     i = i + 1
66.     file.write("\n" + str(i) + ";" + str(ear))
67.
68.     # check to see if the eye aspect ratio is below the blink
69.     # threshold, and if so, increment the blink frame counter
70.     if ear < EYE_AR_THRESH:
71.
72.         COUNTER += 1
73.
74.         # if the eyes were closed for a sufficient number of
75.         # then sound the alarm
76.         if COUNTER >= EYE_AR_CONSEC_FRAMES:
77.             # check to see` if an alarm file was supplied,
78.             # and if so, start a thread to have the alarm
79.             # sound played in the background
80.
81.             # draw an alarm on the frame
82.             cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
83.             cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
84.
85.     # otherwise, the eye aspect ratio is not below the blink
86.     # threshold, so reset the counter and alarm
87.
88.     # draw the computed eye aspect ratio on the frame to help
89.     # with debugging and setting the correct eye aspect ratio
90.     # thresholds and frame counters
91.     cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
92.                 cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
93.
94.     cv2.imshow("\n lalalala", frame)
95.     return (frame, ear)
96.
97.
98. # define two constants, one for the eye aspect ratio to indicate
99. # blink and then a second constant for the number of consecutive
100. # frames the eye must be below the threshold for to set off the
101. # alarm
102. EYE_AR_THRESH = 0.3
103. EYE_AR_CONSEC_FRAMES = 48
104.
105. # initialize the frame counter as well as a boolean used to
106. # indicate if the alarm is going off
107. COUNTER = 0
108. # ALARM_ON = false
109.
110. # initialize dlib's face detector (HOG-based) and then create
111. # the facial landmark predictor
113. detector = dlib.get_frontal_face_detector()
114. predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
115.
116. # grab the indexes of the facial landmarks for the left and
117. # right eye, respectively
118. (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
119. (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
120.
121. # start the video stream thread
122. print("[INFO] starting video stream thread...")
123. vs = VideoStream(0).start()
124. time.sleep(1.0)
125.
126. # loop over frames from the video stream
127. file = open('ear.txt', 'w')
129. while true:
131.    # grab the frame from the threaded video file stream, resize
132.    # it, and convert it to grayscale
133.    # channels)
135.    frame = imutils.resize(frame, width=450)
136.    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
137.
138.    # detect faces in the grayscale frame
139.    rects = detector(gray, 0)
140.    closedEyesFrames = []
141.    # loop over the face detections
142.    for rect in rects:
143.
144.        shape = predictor(gray, rect)
145.        result, ear = my_functions(frame, shape)
146.        if(ear <= EYE_AR_THRESH):
148.            # img = Image.fromarray(result, 'RGB')
149.            print(closedEyesFrames)
150.    # img.save('my.png')
151.    # img.show()
152.    # cv2.imshow("\n result", frame)
153.
154.    # show the frame
155.    cv2.imshow("\n Frame", frame)
156.    key = cv2.waitKey(1) & 0xFF
157.
158.    # if the `q` key was pressed, break from the loop
159.    if key == ord("q"):
160.        file.close()
161.
162.        break
163.
164. # do a bit of cleanup
165. cv2.destroyAllWindows()
166. vs.stop()
167.
Parsed in 0.040 seconds