-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfacedetection.py
More file actions
137 lines (102 loc) · 4.33 KB
/
facedetection.py
File metadata and controls
137 lines (102 loc) · 4.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import cv2
import numpy as np
import speech_recognition as sr
import threading
# Load Haar cascade for face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
# Load pre-trained deep learning models
dnn_model = r"D:\facewebapp\models\deploy.prototxt"
dnn_weights = r"D:\facewebapp\models\res10_300x300_ssd_iter_140000.caffemodel"
age_model = r"D:\facewebapp\models\age_deploy.prototxt"
age_weights = r"D:\facewebapp\models\age_net.caffemodel"
gender_model = r"D:\facewebapp\models\gender_deploy.prototxt"
gender_weights = r"D:\facewebapp\models\gender_net.caffemodel"
# Load DNN models
face_net = cv2.dnn.readNetFromCaffe(dnn_model, dnn_weights)
age_net = cv2.dnn.readNetFromCaffe(age_model, age_weights)
gender_net = cv2.dnn.readNetFromCaffe(gender_model, gender_weights)
# Age and gender labels
GENDER_LABELS = ["Male", "Female"]
# Original labels from model (cannot be changed unless retrained)
ORIGINAL_AGE_LABELS = ["(0-2)", "(4-6)", "(8-12)", "(15-20)", "(20-25)", "(25-32)", "(38-43)", "(48-53)", "(60-100)"]
# Custom remapped labels (just visual)
AGE_LABELS = ["1", "5", "10", "17", "23", "28", "40", "50", "65+"]
# Speech recognition setup
recognizer = sr.Recognizer()
speech_text = "" # Store speech text
def detect_faces_dnn(image, net):
"""Detects faces in an image using OpenCV DNN."""
height, width = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
faces = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.7:
box = detections[0, 0, i, 3:7] * np.array([width, height, width, height])
(x, y, x_max, y_max) = box.astype("int")
faces.append((x, y, x_max, y_max))
return faces
def predict_age_gender(face_image):
"""Predicts age and gender for a given face image."""
blob = cv2.dnn.blobFromImage(face_image, 1.0, (227, 227), (78.426337, 87.768914, 114.895847), swapRB=False)
# Predict gender
gender_net.setInput(blob)
gender_preds = gender_net.forward()
gender = GENDER_LABELS[gender_preds[0].argmax()]
# Predict age
age_net.setInput(blob)
age_preds = age_net.forward()
age = AGE_LABELS[age_preds[0].argmax()]
return gender, age
def recognize_speech():
"""Continuously records speech and saves it to a text file."""
global speech_text
with sr.Microphone() as source:
print("🎤 Listening... Speak now!")
recognizer.adjust_for_ambient_noise(source)
while True:
try:
audio = recognizer.listen(source, timeout=5)
text = recognizer.recognize_google(audio)
speech_text += text + " "
# Save speech to a file
with open("speech_output.txt", "w") as file:
file.write(speech_text)
print(f"📝 Recognized: {text}")
except sr.WaitTimeoutError:
print("⏳ No speech detected... continuing.")
except sr.UnknownValueError:
print("❌ Could not understand audio.")
except sr.RequestError:
print("⚠ Speech recognition service error.")
def main():
cap = cv2.VideoCapture(0)
# Start speech recognition in a separate thread
speech_thread = threading.Thread(target=recognize_speech, daemon=True)
speech_thread.start()
while True:
ret, frame = cap.read()
if not ret:
break
faces = detect_faces_dnn(frame.copy(), face_net)
for (x, y, x_max, y_max) in faces:
face = frame[y:y_max, x:x_max] # Extract face ROI
if face.size == 0:
continue
# Predict age and gender
gender, age = predict_age_gender(face)
# Draw face rectangle
cv2.rectangle(frame, (x, y), (x_max, y_max), (0, 255, 0), 2)
# Display age and gender
label = f"{gender}, {age}"
cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow('Age & Gender Prediction', frame)
# Press 'q' to quit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()