GAP / app.py
Phani-ISB's picture
Update app.py
684baaa verified
# Check the requirements.txt file , import necessary libraries
import gradio as gr
import numpy as np
import cv2
from PIL import Image
from ultralytics import YOLO
from transformers import pipeline
# Loading Models via Pipeline
yolo = YOLO("yolov8n.pt")
age_model = pipeline(
"image-classification",
model="nateraw/vit-age-classifier",
device=-1
)
gender_model = pipeline(
"image-classification",
model="rizvandwiki/gender-classification",
device=-1
)
# Live Detection
def live_detect(img):
if img is None:
return img
frame = np.array(img)
frame = cv2.resize(frame, (480, 360)) # mobile optimized
results = yolo(frame, conf=0.5, classes=[0], verbose=False)[0]
for box in results.boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0])
crop = frame[y1:y2, x1:x2]
if crop.size == 0:
continue
face = cv2.resize(crop, (96, 96))
pil_face = Image.fromarray(face)
age = age_model(pil_face)[0]["label"]
gender = gender_model(pil_face)[0]["label"]
label = f"{gender}, {age}"
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(
frame,
label,
(x1, y1 - 6),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
2,
)
return Image.fromarray(frame)
# GradIO User Interface (UI)
with gr.Blocks() as demo:
gr.Markdown(
"""
Human Lens :: Live Detection with Gender & Age
"""
)
cam = gr.Image(
sources=["webcam"],
type="pil",
streaming=True,
label="Live Camera",
elem_id="cam"
)
cam.stream(
fn=live_detect,
inputs=cam,
outputs=cam,
show_progress=False
)
gr.Markdown("<hr><center><b>Developed by Phani</b></center>")
demo.queue().launch(
css="""
#cam {max-width:100%; border-radius:12px;}
"""
)