eaglelandsonce commited on
Commit
c9e71d3
·
verified ·
1 Parent(s): 4c0a827

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -0
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import gradio as gr
3
+ from PIL import Image, ImageDraw, ImageFont
4
+
5
+ from azure.ai.vision.face import FaceClient
6
+ from azure.ai.vision.face.models import (
7
+ FaceDetectionModel,
8
+ FaceRecognitionModel,
9
+ FaceAttributeTypeDetection01,
10
+ )
11
+ from azure.core.credentials import AzureKeyCredential
12
+
13
+ # ✅ Hardcoded endpoint (safe to keep in code)
14
+ AI_SERVICE_ENDPOINT = "https://face59137214.cognitiveservices.azure.com/"
15
+
16
+
17
+ def pil_to_bytes(img: Image.Image) -> bytes:
18
+ if img.mode != "RGB":
19
+ img = img.convert("RGB")
20
+ buf = io.BytesIO()
21
+ img.save(buf, format="JPEG", quality=95)
22
+ return buf.getvalue()
23
+
24
+
25
+ def analyze_and_annotate(image: Image.Image, api_key: str):
26
+ if image is None:
27
+ return "No image provided.", None
28
+
29
+ if not api_key or not api_key.strip():
30
+ return "Please paste your AI_SERVICE_KEY in the box.", image
31
+
32
+ # Create Face client using hardcoded endpoint + user-pasted key
33
+ face_client = FaceClient(
34
+ endpoint=AI_SERVICE_ENDPOINT,
35
+ credential=AzureKeyCredential(api_key.strip())
36
+ )
37
+
38
+ # Facial features to retrieve (same as the lab)
39
+ features = [
40
+ FaceAttributeTypeDetection01.HEAD_POSE,
41
+ FaceAttributeTypeDetection01.OCCLUSION,
42
+ FaceAttributeTypeDetection01.ACCESSORIES,
43
+ ]
44
+
45
+ img_bytes = pil_to_bytes(image)
46
+
47
+ detected_faces = face_client.detect(
48
+ image_content=img_bytes,
49
+ detection_model=FaceDetectionModel.DETECTION01,
50
+ recognition_model=FaceRecognitionModel.RECOGNITION01,
51
+ return_face_id=False,
52
+ return_face_attributes=features,
53
+ )
54
+
55
+ if not detected_faces:
56
+ return "0 faces detected.", image
57
+
58
+ # Prepare annotation image
59
+ annotated = image.copy()
60
+ if annotated.mode != "RGB":
61
+ annotated = annotated.convert("RGB")
62
+ draw = ImageDraw.Draw(annotated)
63
+
64
+ # Font (best-effort)
65
+ try:
66
+ font = ImageFont.truetype("arial.ttf", 24)
67
+ except Exception:
68
+ font = ImageFont.load_default()
69
+
70
+ lines = [f"{len(detected_faces)} faces detected.\n"]
71
+
72
+ for idx, face in enumerate(detected_faces, start=1):
73
+ r = face.face_rectangle
74
+ left, top, right, bottom = r.left, r.top, r.left + r.width, r.top + r.height
75
+
76
+ # Bounding box + face number label
77
+ draw.rectangle([(left, top), (right, bottom)], outline="lime", width=5)
78
+
79
+ label = str(idx)
80
+ x0, y0, x1, y1 = draw.textbbox((0, 0), label, font=font)
81
+ tw, th = x1 - x0, y1 - y0
82
+ pad = 6
83
+ label_bg = [(left, max(0, top - th - 2 * pad)), (left + tw + 2 * pad, top)]
84
+ draw.rectangle(label_bg, fill="lime")
85
+ draw.text((left + pad, label_bg[0][1] + pad), label, fill="black", font=font)
86
+
87
+ # Extract attributes
88
+ attrs = face.face_attributes
89
+ hp = getattr(attrs, "head_pose", None)
90
+ occ = getattr(attrs, "occlusion", None)
91
+ acc = getattr(attrs, "accessories", None)
92
+
93
+ yaw = getattr(hp, "yaw", None) if hp else None
94
+ pitch = getattr(hp, "pitch", None) if hp else None
95
+ roll = getattr(hp, "roll", None) if hp else None
96
+
97
+ forehead_occ = occ.get("foreheadOccluded") if isinstance(occ, dict) else None
98
+ eye_occ = occ.get("eyeOccluded") if isinstance(occ, dict) else None
99
+ mouth_occ = occ.get("mouthOccluded") if isinstance(occ, dict) else None
100
+
101
+ accessories_list = []
102
+ if acc:
103
+ for a in acc:
104
+ accessories_list.append(str(a.type))
105
+
106
+ # Report
107
+ lines.append(f"Face number {idx}")
108
+ lines.append(f" - Bounding box: left={left}, top={top}, width={r.width}, height={r.height}")
109
+ lines.append(f" - Head Pose (Yaw): {yaw}")
110
+ lines.append(f" - Head Pose (Pitch): {pitch}")
111
+ lines.append(f" - Head Pose (Roll): {roll}")
112
+ lines.append(f" - Forehead occluded?: {forehead_occ}")
113
+ lines.append(f" - Eye occluded?: {eye_occ}")
114
+ lines.append(f" - Mouth occluded?: {mouth_occ}")
115
+ lines.append(" - Accessories:")
116
+ if accessories_list:
117
+ for a in accessories_list:
118
+ lines.append(f" - {a}")
119
+ else:
120
+ lines.append(" - None")
121
+ lines.append("")
122
+
123
+ return "\n".join(lines).strip(), annotated
124
+
125
+
126
+ demo = gr.Interface(
127
+ fn=analyze_and_annotate,
128
+ inputs=[
129
+ gr.Image(type="pil", label="Drag & drop an image"),
130
+ gr.Textbox(label="AI_SERVICE_KEY (paste here)", type="password", placeholder="Paste your key..."),
131
+ ],
132
+ outputs=[
133
+ gr.Textbox(label="Face analysis (numbers + attributes)", lines=18),
134
+ gr.Image(type="pil", label="Annotated image (boxes + numbers)"),
135
+ ],
136
+ title="Azure AI Face Analysis (Drag & Drop)",
137
+ description=f"Endpoint is hardcoded to: {AI_SERVICE_ENDPOINT}",
138
+ )
139
+
140
+ if __name__ == "__main__":
141
+ demo.launch()