chuuhtetnaing's picture
init project
d06fdab
import gradio as gr
from huggingface_hub import hf_hub_download
from ultralytics import YOLO
from PIL import Image, ImageDraw, ImageFont
import numpy as np
# Download and load model
model_path = hf_hub_download(repo_id="chuuhtetnaing/face-detection-yolo26n", filename="best.pt")
model = YOLO(model_path)
def crop_face_with_padding(image, bbox, padding_ratio=0.15):
"""Crop face with padding, handling edge cases."""
img_w, img_h = image.size
x1, y1, x2, y2 = bbox
box_w = x2 - x1
box_h = y2 - y1
pad_w = int(box_w * padding_ratio)
pad_h = int(box_h * padding_ratio)
# Apply padding with boundary checks
new_x1 = max(0, int(x1 - pad_w))
new_y1 = max(0, int(y1 - pad_h))
new_x2 = min(img_w, int(x2 + pad_w))
new_y2 = min(img_h, int(y2 + pad_h))
return image.crop((new_x1, new_y1, new_x2, new_y2))
def detect_faces(image):
"""Detect faces and return annotated image + cropped faces."""
if image is None:
return None, None
image = Image.fromarray(image) if isinstance(image, np.ndarray) else image
# Run detection
results = model(image)[0]
# Draw bounding boxes
annotated = image.copy()
draw = ImageDraw.Draw(annotated)
cropped_faces = []
for box in results.boxes:
x1, y1, x2, y2 = box.xyxy[0].tolist()
conf = box.conf[0].item()
# Draw rectangle and label
draw.rectangle([x1, y1, x2, y2], outline="green", width=20)
font = ImageFont.load_default(size=200)
draw.text((x1, y1 - 200), f"{conf:.2f}", fill="green", font=font)
# Crop with padding
cropped = crop_face_with_padding(image, (x1, y1, x2, y2))
cropped_faces.append(cropped)
# Create gallery of cropped faces
cropped_gallery = cropped_faces if cropped_faces else None
return annotated, cropped_gallery
with gr.Blocks(css=".main-row { margin-top: 30px !important; }") as demo:
gr.Markdown("<h1 style='text-align: center; margin-top: 20px'>Face Detection with YOLO26n</h1>")
with gr.Row(elem_classes="main-row"):
with gr.Column(scale=1):
input_image = gr.Image(label="Upload Image", type="pil", height=400)
detect_btn = gr.Button("Detect Faces", variant="primary")
with gr.Column(scale=1):
with gr.Row():
output_image = gr.Image(label="Detected Faces", height=400)
cropped_gallery = gr.Gallery(label="Cropped Faces", columns=2, height=400)
detect_btn.click(
fn=detect_faces,
inputs=input_image,
outputs=[output_image, cropped_gallery]
)
if __name__ == "__main__":
demo.launch()