Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 56 additions & 2 deletions src/face_detection_yolov12.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@ def __init__(self, model_path):
try:
from ultralytics import YOLO

self.yolo = YOLO(model_path)
print(f"Loaded YOLOv12 model from {model_path}")
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.yolo = YOLO(model_path).to(self.device)
print(f"Loaded YOLOv12 model from {model_path} on {self.device}")
except ImportError:
raise ImportError("ultralytics package not found. pip install ultralytics")
except Exception as e:
Expand Down Expand Up @@ -136,6 +137,59 @@ def draw_faces(self, image, detections, color=(0, 255, 0), thickness=2, show_con
)
return result

def blur_faces(self, image, detections, blur_factor=30):
"""Apply Gaussian blur to detected faces for privacy"""
result = image.copy()
h, w = image.shape[:2]

for det in detections:
x1, y1, x2, y2 = det["x1"], det["y1"], det["x2"], det["y2"]

# Ensure coordinates are within image boundaries
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(w, x2), min(h, y2)

if x2 > x1 and y2 > y1:
# Extract the face ROI from the ORIGINAL image to avoid cumulative blur
# but here we work on result which is a copy.
face_roi = image[y1:y2, x1:x2]

# Ensure kernel size is odd and based on face size
kw = (x2 - x1) // 2
kh = (y2 - y1) // 2

# Minimum kernel size for noticeable blur
kw = max(25, kw if kw % 2 != 0 else kw + 1)
kh = max(25, kh if kh % 2 != 0 else kh + 1)

# Use BORDER_DEFAULT (reflection) for better visual result
blurred_face = cv2.GaussianBlur(face_roi, (kw, kh), blur_factor)
result[y1:y2, x1:x2] = blurred_face

return result

def get_face_crops(self, image, detections, padding=0.1):
"""Extract cropped face images from detection results"""
crops = []
h, w = image.shape[:2]

for det in detections:
x1, y1, x2, y2 = det["x1"], det["y1"], det["x2"], det["y2"]
fw, fh = x2 - x1, y2 - y1

# Add padding
px = int(fw * padding)
py = int(fh * padding)

cx1, cy1 = max(0, x1 - px), max(0, y1 - py)
cx2, cy2 = min(w, x2 + px), min(h, y2 + py)

if cx2 > cx1 and cy2 > cy1:
crop = image[cy1:cy2, cx1:cx2]
crops.append(crop)

return crops


# --- GUI Class (Webcam) ---
class WebcamFaceDetectionGUI:
Expand Down
20 changes: 17 additions & 3 deletions src/web_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,10 @@ def detect_image():
if not allowed_file(file.filename) or not is_image(file.filename):
return jsonify({"error": "Only image files allowed"}), 400

# Get model selection
# Get parameters
model = request.form.get("model", "yolov12l-face.pt")
blur = request.form.get("blur") == "true"

if model not in ALLOWED_MODELS:
app.logger.info(f"Invalid model '{model}' requested. Fallback to default.")
model = "yolov12l-face.pt"
Expand All @@ -171,12 +173,23 @@ def detect_image():
# Use standard detection for uploaded files
detections = detector.detect_faces(image, conf_threshold=0.32)

# Draw detections
result_image = detector.draw_faces(image, detections, show_confidence=True)
# Process image: Draw or Blur
if blur:
result_image = detector.blur_faces(image, detections)
else:
result_image = detector.draw_faces(image, detections, show_confidence=True)

if result_image is None:
return jsonify({"error": "Failed to process image"}), 500

# Extract crops for gallery
crops_base64 = []
if len(detections) > 0:
crops = detector.get_face_crops(image, detections)
for crop in crops:
_, buffer = cv2.imencode(".jpg", crop)
crops_base64.append(base64.b64encode(buffer).decode())

# Convert result to base64 for display
_, buffer = cv2.imencode(".jpg", result_image)
img_base64 = base64.b64encode(buffer).decode()
Expand All @@ -197,6 +210,7 @@ def detect_image():
}
for i, det in enumerate(detections)
],
"crops": crops_base64,
},
}

Expand Down
93 changes: 85 additions & 8 deletions web/templates/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,18 @@
<title>YOLOv12 Face Detection - Web</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); min-height: 100vh; padding: 20px; }
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); min-height: 100vh; padding: 20px; transition: background 0.3s ease; }
body.dark-mode { background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%); color: #e0e0e0; }
.card.dark-mode { background: #0f3460; color: #e0e0e0; border: 1px solid #16213e; }
.card.dark-mode h2, .card.dark-mode h3 { color: #4ecca3; }
.card.dark-mode label { color: #e0e0e0; }
.card.dark-mode .upload-area { background: #16213e; border-color: #4ecca3; }
.card.dark-mode .upload-area p { color: #4ecca3; }
.card.dark-mode .detection-stats { background: #1a1a2e; }
.card.dark-mode .stat-value { color: #4ecca3; }
.container { max-width: 1200px; margin: 0 auto; }
.header { text-align: center; color: white; margin-bottom: 40px; }
.header { text-align: center; color: white; margin-bottom: 40px; position: relative; }
.theme-toggle { position: absolute; top: 0; right: 0; }
.header h1 { font-size: 2.5em; margin-bottom: 10px; text-shadow: 2px 2px 4px rgba(0,0,0,0.3); }
.header p { font-size: 1.1em; opacity: 0.9; }
.tabs { display: flex; gap: 10px; margin-bottom: 30px; flex-wrap: wrap; }
Expand Down Expand Up @@ -181,6 +190,9 @@
<div class="header">
<h1>🔍 YOLOv12 Face Detection</h1>
<p>Next-Gen face detection with Attention Mechanism</p>
<div class="theme-toggle">
<button class="tab-button" onclick="toggleDarkMode()" id="darkModeBtn">🌙 Dark Mode</button>
</div>
</div>

<div class="tabs">
Expand All @@ -196,11 +208,17 @@ <h2>Detect Faces in Image</h2>

<div id="imageMessage" class="message"></div>

<div class="form-group">
<label for="imageModel">🤖 Select Model:</label>
<select id="imageModel">
<option>Loading models...</option>
</select>
<div class="form-group" style="display: flex; gap: 20px; align-items: center; flex-wrap: wrap;">
<div style="flex: 2; min-width: 200px;">
<label for="imageModel">🤖 Select Model:</label>
<select id="imageModel">
<option>Loading models...</option>
</select>
</div>
<div style="flex: 1; min-width: 220px; display: flex; align-items: center; gap: 12px; margin-top: 25px; padding: 10px; background: rgba(102, 126, 234, 0.1); border-radius: 8px;">
<input type="checkbox" id="blurFaces" style="width: 22px; height: 22px; cursor: pointer;">
<label for="blurFaces" style="margin-bottom: 0; cursor: pointer; font-weight: 600;">🕵️ Privacy Mode (Blur Faces)</label>
</div>
</div>

<div class="form-group">
Expand Down Expand Up @@ -237,6 +255,14 @@ <h2>Detect Faces in Image</h2>

<div class="result-container" id="imageResult">
<img id="resultImage" class="result-image" alt="Detection Result">

<div id="gallerySection" style="display: none; margin-bottom: 20px;">
<h3 style="color: #667eea; margin-bottom: 15px;">👤 Face Gallery</h3>
<div id="cropsContainer" style="display: flex; gap: 10px; flex-wrap: wrap; background: #f0f2ff; padding: 15px; border-radius: 8px;">
<!-- Crops will be added here -->
</div>
</div>

<div class="detection-stats">
<div class="stat-row">
<span class="stat-label">Faces Detected:</span>
Expand Down Expand Up @@ -452,7 +478,7 @@ <h3 style="color: #667eea; margin-top: 30px; margin-bottom: 15px;">🔧 API Endp

<div class="footer text-center mt-5 py-4" style="border-top: 1px solid rgba(255,255,255,0.1);">
<p class="mb-1 text-white-50">
🚀 YOLOv12 Face Detection Web - Version 1.2.0 | Powered by <strong class="text-white">Flask</strong> & <strong class="text-white">Ultralytics</strong>
🚀 YOLOv12 Face Detection Web - Version 1.3.0 | Powered by <strong class="text-white">Flask</strong> & <strong class="text-white">Ultralytics</strong>
</p>
<p class="mb-0 small text-muted">
2026 &copy; Developed by <a href="mailto:luongminhngoc0@gmail.com">Ngoc M. Luong</a> with ❤️
Expand Down Expand Up @@ -724,10 +750,34 @@ <h3 style="color: #667eea; margin-top: 30px; margin-bottom: 15px;">🔧 API Endp
}
}, 150); // Process every 150ms (~6-7 FPS)
}
// Dark Mode Toggle
function toggleDarkMode() {
const body = document.body;
const cards = document.querySelectorAll('.card');
const btn = document.getElementById('darkModeBtn');

body.classList.toggle('dark-mode');
cards.forEach(card => card.classList.toggle('dark-mode'));

if (body.classList.contains('dark-mode')) {
btn.innerHTML = '☀️ Light Mode';
localStorage.setItem('theme', 'dark');
} else {
btn.innerHTML = '🌙 Dark Mode';
localStorage.setItem('theme', 'light');
}
}

// Load saved theme
if (localStorage.getItem('theme') === 'dark') {
toggleDarkMode();
}

// Detect image
async function detectImage() {
const file = document.getElementById('imageFile').files[0];
const model = document.getElementById('imageModel').value;
const blur = document.getElementById('blurFaces').checked;

if (!file) {
showMessage('imageMessage', 'Please select an image', 'error');
Expand All @@ -737,6 +787,7 @@ <h3 style="color: #667eea; margin-top: 30px; margin-bottom: 15px;">🔧 API Endp
const formData = new FormData();
formData.append('file', file);
formData.append('model', model);
formData.append('blur', blur);

document.getElementById('detectImageBtn').disabled = true;
document.getElementById('imageProgress').style.display = 'block';
Expand Down Expand Up @@ -771,6 +822,32 @@ <h3 style="color: #667eea; margin-top: 30px; margin-bottom: 15px;">🔧 API Endp
// Display image
document.getElementById('resultImage').src = data.image;

// Handle Gallery
const cropsContainer = document.getElementById('cropsContainer');
const gallerySection = document.getElementById('gallerySection');
cropsContainer.innerHTML = '';

if (data.detections.crops && data.detections.crops.length > 0) {
data.detections.crops.forEach((cropBase64, index) => {
const img = document.createElement('img');
img.src = `data:image/jpeg;base64,${cropBase64}`;
img.style.width = '80px';
img.style.height = '80px';
img.style.objectFit = 'cover';
img.style.borderRadius = '4px';
img.style.cursor = 'pointer';
img.title = `Face ${index + 1}`;
img.onclick = () => {
const win = window.open();
win.document.write(`<img src="data:image/jpeg;base64,${cropBase64}">`);
};
cropsContainer.appendChild(img);
});
gallerySection.style.display = 'block';
} else {
gallerySection.style.display = 'none';
}

// Display detection stats
document.getElementById('faceCount').textContent = data.detections.count;

Expand Down
Loading