File size: 1,971 Bytes
961f04d
4068bd0
6672805
326b8f4
6672805
539dd89
326b8f4
539dd89
961f04d
 
326b8f4
 
 
 
539dd89
326b8f4
 
 
 
bfac2df
539dd89
326b8f4
 
961f04d
539dd89
961f04d
 
326b8f4
539dd89
961f04d
 
539dd89
326b8f4
539dd89
 
 
4068bd0
326b8f4
6672805
4068bd0
539dd89
 
4068bd0
539dd89
 
4068bd0
539dd89
961f04d
 
 
 
 
 
 
 
 
 
6672805
539dd89
961f04d
 
 
684baaa
6672805
961f04d
 
 
 
 
 
 
 
 
 
 
 
 
6672805
326b8f4
539dd89
326b8f4
961f04d
 
6672805
 
326b8f4
6672805
684baaa
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# Check the requirements.txt file , import necessary libraries
import gradio as gr
import numpy as np
import cv2
from PIL import Image
from ultralytics import YOLO
from transformers import pipeline

# Loading Models via Pipeline 
yolo = YOLO("yolov8n.pt")

age_model = pipeline(
    "image-classification",
    model="nateraw/vit-age-classifier",
    device=-1
)

gender_model = pipeline(
    "image-classification",
    model="rizvandwiki/gender-classification",
    device=-1
)

# Live Detection
def live_detect(img):
    if img is None:
        return img

    frame = np.array(img)
    frame = cv2.resize(frame, (480, 360))  # mobile optimized

    results = yolo(frame, conf=0.5, classes=[0], verbose=False)[0]

    for box in results.boxes:
        x1, y1, x2, y2 = map(int, box.xyxy[0])
        crop = frame[y1:y2, x1:x2]

        if crop.size == 0:
            continue

        face = cv2.resize(crop, (96, 96))
        pil_face = Image.fromarray(face)

        age = age_model(pil_face)[0]["label"]
        gender = gender_model(pil_face)[0]["label"]

        label = f"{gender}, {age}"

        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
        cv2.putText(
            frame,
            label,
            (x1, y1 - 6),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.5,
            (0, 255, 0),
            2,
        )

    return Image.fromarray(frame)

# GradIO User Interface (UI)
with gr.Blocks() as demo:

    gr.Markdown(
        """
        Human Lens  :: Live Detection with Gender & Age
        """
    )

    cam = gr.Image(
        sources=["webcam"],
        type="pil",
        streaming=True,
        label="Live Camera",
        elem_id="cam"
    )

    cam.stream(
        fn=live_detect,
        inputs=cam,
        outputs=cam,
        show_progress=False
    )

    gr.Markdown("<hr><center><b>Developed by Phani</b></center>")

demo.queue().launch(
    css="""
    #cam {max-width:100%; border-radius:12px;}
    """
)