Skip to content

Commit 86c2960

Browse files
committed
Added flask server
1 parent eafe138 commit 86c2960

File tree

8 files changed

+313
-175
lines changed

8 files changed

+313
-175
lines changed

.env

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
VITE_OPENAI_API_KEY='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'
2+
VITE_OPENAI_API_URL='https://api.openai.com/v1/chat/completions'

flask-server/server.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from flask import Flask, request, jsonify
2+
from flask_cors import CORS
3+
from flask_socketio import SocketIO, emit
4+
import threading
5+
import time
6+
7+
app = Flask(__name__)
8+
app.config['SECRET_KEY'] = 'secret!'
9+
CORS(app)
10+
socketio = SocketIO(app, cors_allowed_origins="*") # Allow all origins for SocketIO
11+
12+
def check_sleepiness():
13+
print('Sleepiness check thread started') # Confirm the thread starts
14+
while True:
15+
time.sleep(5)
16+
print('Emitting sleepy notification') # Confirm it reaches this point
17+
socketio.emit('sleepy_notification', {'sleepy': True})
18+
19+
@app.route('/')
20+
def index():
21+
return "Sleepiness Detection Server"
22+
23+
@socketio.on('connect')
24+
def test_connect():
25+
print('Client connected')
26+
27+
@socketio.on('disconnect')
28+
def test_disconnect():
29+
print('Client disconnected')
30+
31+
if __name__ == '__main__':
32+
# Start the sleepiness check in a separate thread
33+
socketio.start_background_task(check_sleepiness)
34+
# Run the Flask app
35+
socketio.run(app, port=5000)

index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
<!doctype html>
1+
<!DOCTYPE html>
22
<html lang="en">
33
<head>
44
<meta charset="UTF-8" />

model_integrated.py

Lines changed: 156 additions & 156 deletions
Original file line numberDiff line numberDiff line change
@@ -1,161 +1,161 @@
1-
import cv2
2-
import asyncio
3-
import websockets
4-
from imutils import face_utils
5-
from imutils.video import VideoStream
6-
7-
8-
from scipy.spatial import distance as dist
9-
def eye_aspect_ratio(eye):
10-
# Vertical eye landmarks
11-
A = dist.euclidean(eye[1], eye[5])
12-
B = dist.euclidean(eye[2], eye[4])
13-
# Horizontal eye landmarks
14-
C = dist.euclidean(eye[0], eye[3])
15-
16-
# The EAR Equation
17-
EAR = (A + B) / (2.0 * C)
18-
return EAR
19-
20-
def mouth_aspect_ratio(mouth):
21-
A = dist.euclidean(mouth[13], mouth[19])
22-
B = dist.euclidean(mouth[14], mouth[18])
23-
C = dist.euclidean(mouth[15], mouth[17])
24-
25-
MAR = (A + B + C) / 3.0
26-
return MAR
27-
28-
#all eye and mouth aspect ratio with time
29-
ear_list=[]
30-
total_ear=[]
31-
mar_list=[]
32-
total_mar=[]
33-
ts=[]
34-
total_ts=[]
35-
36-
async def send_message(websocket, message):
37-
await websocket.send(message)
38-
39-
async def detect_faces_and_send():
40-
# Set up WebSocket server
41-
async with websockets.connect('ws://localhost:5000') as websocket:
42-
# Declare a constant which will work as the threshold for EAR value, below which it will be regared as a blink
43-
EAR_THRESHOLD = 0.3
44-
# Declare another costant to hold the consecutive number of frames to consider for a blink
45-
CONSECUTIVE_FRAMES = 20
46-
# Another constant which will work as a threshold for MAR value
47-
MAR_THRESHOLD = 14
48-
49-
# Initialize two counters
50-
BLINK_COUNT = 0
51-
FRAME_COUNT = 0
52-
53-
# Now, intialize the dlib's face detector model as 'detector' and the landmark predictor model as 'predictor'
54-
print("[INFO]Loading the predictor.....")
55-
detector = dlib.get_frontal_face_detector()
56-
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
57-
58-
# Grab the indexes of the facial landamarks for the left and right eye respectively
59-
(lstart, lend) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
60-
(rstart, rend) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
61-
(mstart, mend) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
62-
63-
# Now start the video stream and allow the camera to warm-up
64-
print("[INFO]Loading Camera.....")
65-
vs = VideoStream(usePiCamera = False).start()
66-
time.sleep(2)
67-
68-
assure_path_exists("dataset/")
69-
count_sleep = 0
70-
count_yawn = 0
71-
# Initialize OpenCV's face detection model
1+
# import cv2
2+
# import asyncio
3+
# import websockets
4+
# from imutils import face_utils
5+
# from imutils.video import VideoStream
6+
7+
8+
# from scipy.spatial import distance as dist
9+
# def eye_aspect_ratio(eye):
10+
# # Vertical eye landmarks
11+
# A = dist.euclidean(eye[1], eye[5])
12+
# B = dist.euclidean(eye[2], eye[4])
13+
# # Horizontal eye landmarks
14+
# C = dist.euclidean(eye[0], eye[3])
15+
16+
# # The EAR Equation
17+
# EAR = (A + B) / (2.0 * C)
18+
# return EAR
19+
20+
# def mouth_aspect_ratio(mouth):
21+
# A = dist.euclidean(mouth[13], mouth[19])
22+
# B = dist.euclidean(mouth[14], mouth[18])
23+
# C = dist.euclidean(mouth[15], mouth[17])
24+
25+
# MAR = (A + B + C) / 3.0
26+
# return MAR
27+
28+
# #all eye and mouth aspect ratio with time
29+
# ear_list=[]
30+
# total_ear=[]
31+
# mar_list=[]
32+
# total_mar=[]
33+
# ts=[]
34+
# total_ts=[]
35+
36+
# async def send_message(websocket, message):
37+
# await websocket.send(message)
38+
39+
# async def detect_faces_and_send():
40+
# # Set up WebSocket server
41+
# async with websockets.connect('ws://localhost:5000') as websocket:
42+
# # Declare a constant which will work as the threshold for EAR value, below which it will be regared as a blink
43+
# EAR_THRESHOLD = 0.3
44+
# # Declare another costant to hold the consecutive number of frames to consider for a blink
45+
# CONSECUTIVE_FRAMES = 20
46+
# # Another constant which will work as a threshold for MAR value
47+
# MAR_THRESHOLD = 14
48+
49+
# # Initialize two counters
50+
# BLINK_COUNT = 0
51+
# FRAME_COUNT = 0
52+
53+
# # Now, intialize the dlib's face detector model as 'detector' and the landmark predictor model as 'predictor'
54+
# print("[INFO]Loading the predictor.....")
55+
# detector = dlib.get_frontal_face_detector()
56+
# predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
57+
58+
# # Grab the indexes of the facial landamarks for the left and right eye respectively
59+
# (lstart, lend) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
60+
# (rstart, rend) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
61+
# (mstart, mend) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
62+
63+
# # Now start the video stream and allow the camera to warm-up
64+
# print("[INFO]Loading Camera.....")
65+
# vs = VideoStream(usePiCamera = False).start()
66+
# time.sleep(2)
67+
68+
# assure_path_exists("dataset/")
69+
# count_sleep = 0
70+
# count_yawn = 0
71+
# # Initialize OpenCV's face detection model
7272

73-
# Capture video from the default camera (change the parameter to your camera index if needed)
74-
cap = cv2.VideoCapture(0)
73+
# # Capture video from the default camera (change the parameter to your camera index if needed)
74+
# cap = cv2.VideoCapture(0)
7575

76-
while True:
76+
# while True:
7777

78-
# Extract a frame
79-
frame = vs.read()
80-
cv2.putText(frame, "PRESS 'q' TO EXIT", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 3)
81-
# Resize the frame
82-
frame = imutils.resize(frame, width = 500)
83-
# Convert the frame to grayscale
84-
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
85-
# Detect faces
86-
rects = detector(frame, 1)
87-
88-
# Now loop over all the face detections and apply the predictor
89-
for (i, rect) in enumerate(rects):
90-
shape = predictor(gray, rect)
91-
# Convert it to a (68, 2) size numpy array
92-
shape = face_utils.shape_to_np(shape)
93-
94-
# Draw a rectangle over the detected face
95-
(x, y, w, h) = face_utils.rect_to_bb(rect)
96-
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
97-
# Put a number
98-
cv2.putText(frame, "Driver", (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
99-
100-
leftEye = shape[lstart:lend]
101-
rightEye = shape[rstart:rend]
102-
mouth = shape[mstart:mend]
103-
# Compute the EAR for both the eyes
104-
leftEAR = eye_aspect_ratio(leftEye)
105-
rightEAR = eye_aspect_ratio(rightEye)
106-
107-
# Take the average of both the EAR
108-
EAR = (leftEAR + rightEAR) / 2.0
109-
#live datawrite in csv
110-
ear_list.append(EAR)
111-
#print(ear_list)
78+
# # Extract a frame
79+
# frame = vs.read()
80+
# cv2.putText(frame, "PRESS 'q' TO EXIT", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 3)
81+
# # Resize the frame
82+
# frame = imutils.resize(frame, width = 500)
83+
# # Convert the frame to grayscale
84+
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
85+
# # Detect faces
86+
# rects = detector(frame, 1)
87+
88+
# # Now loop over all the face detections and apply the predictor
89+
# for (i, rect) in enumerate(rects):
90+
# shape = predictor(gray, rect)
91+
# # Convert it to a (68, 2) size numpy array
92+
# shape = face_utils.shape_to_np(shape)
93+
94+
# # Draw a rectangle over the detected face
95+
# (x, y, w, h) = face_utils.rect_to_bb(rect)
96+
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
97+
# # Put a number
98+
# cv2.putText(frame, "Driver", (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
99+
100+
# leftEye = shape[lstart:lend]
101+
# rightEye = shape[rstart:rend]
102+
# mouth = shape[mstart:mend]
103+
# # Compute the EAR for both the eyes
104+
# leftEAR = eye_aspect_ratio(leftEye)
105+
# rightEAR = eye_aspect_ratio(rightEye)
106+
107+
# # Take the average of both the EAR
108+
# EAR = (leftEAR + rightEAR) / 2.0
109+
# #live datawrite in csv
110+
# ear_list.append(EAR)
111+
# #print(ear_list)
112112

113113

114-
ts.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
115-
# Compute the convex hull for both the eyes and then visualize it
116-
leftEyeHull = cv2.convexHull(leftEye)
117-
rightEyeHull = cv2.convexHull(rightEye)
118-
# Draw the contours
119-
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
120-
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
121-
cv2.drawContours(frame, [mouth], -1, (0, 255, 0), 1)
122-
123-
MAR = mouth_aspect_ratio(mouth)
124-
mar_list.append(MAR/10)
125-
# Check if EAR < EAR_THRESHOLD, if so then it indicates that a blink is taking place
126-
# Thus, count the number of frames for which the eye remains closed
127-
if EAR < EAR_THRESHOLD:
128-
FRAME_COUNT += 1
129-
130-
cv2.drawContours(frame, [leftEyeHull], -1, (0, 0, 255), 1)
131-
cv2.drawContours(frame, [rightEyeHull], -1, (0, 0, 255), 1)
132-
133-
if FRAME_COUNT >= CONSECUTIVE_FRAMES:
134-
count_sleep += 1
135-
await send_message(websocket, "Sleepy detected!")
136-
# Add the frame to the dataset ar a proof of drowsy driving
137-
#cv2.imwrite("dataset/frame_sleep%d.jpg" % count_sleep, frame)
138-
#playsound('sound files/alarm.mp3')
139-
#send_notice()
140-
#cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
141-
else:
142-
#if FRAME_COUNT >= CONSECUTIVE_FRAMES:
143-
#playsound('sound files/warning.mp3')
144-
FRAME_COUNT = 0
145-
#cv2.putText(frame, "EAR: {:.2f}".format(EAR), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
146-
147-
# Check if the person is yawning
148-
if MAR > MAR_THRESHOLD:
149-
count_yawn += 1
150-
cv2.drawContours(frame, [mouth], -1, (0, 0, 255), 1)
151-
cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
152-
await send_message(websocket, "Sleepy detected!")
153-
# Add the frame to the dataset ar a proof of drowsy driving
154-
#cv2.imwrite("dataset/frame_yawn%d.jpg" % count_yawn, frame)
155-
#playsound('sound files/alarm.mp3')
156-
#playsound('sound files/warning_yawn.mp3')
157-
#total data collection for plotting
158-
cv2.destroyAllWindows()
159-
vs.stop()
160-
# Run the asyncio event loop
161-
asyncio.run(detect_faces_and_send())
114+
# ts.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
115+
# # Compute the convex hull for both the eyes and then visualize it
116+
# leftEyeHull = cv2.convexHull(leftEye)
117+
# rightEyeHull = cv2.convexHull(rightEye)
118+
# # Draw the contours
119+
# cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
120+
# cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
121+
# cv2.drawContours(frame, [mouth], -1, (0, 255, 0), 1)
122+
123+
# MAR = mouth_aspect_ratio(mouth)
124+
# mar_list.append(MAR/10)
125+
# # Check if EAR < EAR_THRESHOLD, if so then it indicates that a blink is taking place
126+
# # Thus, count the number of frames for which the eye remains closed
127+
# if EAR < EAR_THRESHOLD:
128+
# FRAME_COUNT += 1
129+
130+
# cv2.drawContours(frame, [leftEyeHull], -1, (0, 0, 255), 1)
131+
# cv2.drawContours(frame, [rightEyeHull], -1, (0, 0, 255), 1)
132+
133+
# if FRAME_COUNT >= CONSECUTIVE_FRAMES:
134+
# count_sleep += 1
135+
# await send_message(websocket, "Sleepy detected!")
136+
# # Add the frame to the dataset ar a proof of drowsy driving
137+
# #cv2.imwrite("dataset/frame_sleep%d.jpg" % count_sleep, frame)
138+
# #playsound('sound files/alarm.mp3')
139+
# #send_notice()
140+
# #cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
141+
# else:
142+
# #if FRAME_COUNT >= CONSECUTIVE_FRAMES:
143+
# #playsound('sound files/warning.mp3')
144+
# FRAME_COUNT = 0
145+
# #cv2.putText(frame, "EAR: {:.2f}".format(EAR), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
146+
147+
# # Check if the person is yawning
148+
# if MAR > MAR_THRESHOLD:
149+
# count_yawn += 1
150+
# cv2.drawContours(frame, [mouth], -1, (0, 0, 255), 1)
151+
# cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
152+
# await send_message(websocket, "Sleepy detected!")
153+
# # Add the frame to the dataset ar a proof of drowsy driving
154+
# #cv2.imwrite("dataset/frame_yawn%d.jpg" % count_yawn, frame)
155+
# #playsound('sound files/alarm.mp3')
156+
# #playsound('sound files/warning_yawn.mp3')
157+
# #total data collection for plotting
158+
# cv2.destroyAllWindows()
159+
# vs.stop()
160+
# # Run the asyncio event loop
161+
# asyncio.run(detect_faces_and_send())

0 commit comments

Comments
 (0)