import os import io import traceback import gradio as gr from PIL import Image, ImageDraw, ImageFont from azure.ai.vision.face import FaceClient from azure.ai.vision.face.models import ( FaceDetectionModel, FaceRecognitionModel, FaceAttributeTypeDetection01, ) from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import ClientAuthenticationError, HttpResponseError # ✅ Hardcoded endpoint (you requested this) HARDCODED_ENDPOINT = "https://face59137214.cognitiveservices.azure.com/" def _normalize_endpoint(endpoint: str) -> str: endpoint = (endpoint or "").strip() if not endpoint.startswith("https://"): raise ValueError(f"Endpoint must start with https:// (got: {endpoint})") if not endpoint.endswith("/"): endpoint += "/" return endpoint def _get_endpoint() -> str: # Allow Spaces Secret override if you want: # Settings → Variables and secrets → New secret → AI_SERVICE_ENDPOINT env_ep = os.getenv("AI_SERVICE_ENDPOINT", "").strip() endpoint = env_ep if env_ep else HARDCODED_ENDPOINT return _normalize_endpoint(endpoint) def _get_key(ui_key: str) -> str: # Prefer UI key if provided; else use Spaces Secret AI_SERVICE_KEY key = (ui_key or "").strip() if not key: key = os.getenv("AI_SERVICE_KEY", "").strip() if not key or len(key) < 20: raise ValueError( "Missing AI_SERVICE_KEY.\n\n" "Fix:\n" "• Best: add a Hugging Face Space Secret named AI_SERVICE_KEY\n" " (Settings → Variables and secrets → New secret)\n" "• Or paste the full key into the UI box." ) return key def _make_face_client(ui_key: str) -> FaceClient: endpoint = _get_endpoint() key = _get_key(ui_key) # ✅ Safe debug (no secret leakage) print(f"[DEBUG] endpoint={endpoint}") print(f"[DEBUG] key_len={len(key)} last4={key[-4:]}") return FaceClient(endpoint=endpoint, credential=AzureKeyCredential(key)) def _pil_to_bytes(img: Image.Image) -> bytes: if img.mode != "RGB": img = img.convert("RGB") buf = io.BytesIO() img.save(buf, format="JPEG", quality=95) return buf.getvalue() def analyze_and_annotate(image: Image.Image, ui_key: str): if image is None: return "No image provided.", None try: face_client = _make_face_client(ui_key) # Same feature set as the lab features = [ FaceAttributeTypeDetection01.HEAD_POSE, FaceAttributeTypeDetection01.OCCLUSION, FaceAttributeTypeDetection01.ACCESSORIES, ] img_bytes = _pil_to_bytes(image) detected_faces = face_client.detect( image_content=img_bytes, detection_model=FaceDetectionModel.DETECTION01, recognition_model=FaceRecognitionModel.RECOGNITION01, return_face_id=False, return_face_attributes=features, ) if not detected_faces: return "0 faces detected.", image # Prepare annotation image annotated = image.copy() if annotated.mode != "RGB": annotated = annotated.convert("RGB") draw = ImageDraw.Draw(annotated) try: font = ImageFont.truetype("arial.ttf", 24) except Exception: font = ImageFont.load_default() lines = [f"{len(detected_faces)} faces detected.\n"] for idx, face in enumerate(detected_faces, start=1): r = face.face_rectangle left, top, right, bottom = r.left, r.top, r.left + r.width, r.top + r.height # Draw rectangle draw.rectangle([(left, top), (right, bottom)], outline="lime", width=5) # Draw label background + number label = str(idx) x0, y0, x1, y1 = draw.textbbox((0, 0), label, font=font) tw, th = x1 - x0, y1 - y0 pad = 6 label_bg = [(left, max(0, top - th - 2 * pad)), (left + tw + 2 * pad, top)] draw.rectangle(label_bg, fill="lime") draw.text((left + pad, label_bg[0][1] + pad), label, fill="black", font=font) # Extract attributes (guarded) attrs = face.face_attributes hp = getattr(attrs, "head_pose", None) occ = getattr(attrs, "occlusion", None) acc = getattr(attrs, "accessories", None) yaw = getattr(hp, "yaw", None) if hp else None pitch = getattr(hp, "pitch", None) if hp else None roll = getattr(hp, "roll", None) if hp else None forehead_occ = occ.get("foreheadOccluded") if isinstance(occ, dict) else None eye_occ = occ.get("eyeOccluded") if isinstance(occ, dict) else None mouth_occ = occ.get("mouthOccluded") if isinstance(occ, dict) else None accessories_list = [] if acc: for a in acc: accessories_list.append(str(a.type)) # Output lines including bounding box numbers lines.append(f"Face number {idx}") lines.append(f" - Bounding box: left={left}, top={top}, width={r.width}, height={r.height}") lines.append(f" - Head Pose (Yaw): {yaw}") lines.append(f" - Head Pose (Pitch): {pitch}") lines.append(f" - Head Pose (Roll): {roll}") lines.append(f" - Forehead occluded?: {forehead_occ}") lines.append(f" - Eye occluded?: {eye_occ}") lines.append(f" - Mouth occluded?: {mouth_occ}") lines.append(" - Accessories:") if accessories_list: for a in accessories_list: lines.append(f" - {a}") else: lines.append(" - None") lines.append("") return "\n".join(lines).strip(), annotated except ClientAuthenticationError: # This is your 401 case return ( "AUTH ERROR (401): Invalid key or wrong endpoint for this key.\n\n" "Fix checklist:\n" "1) In your Hugging Face Space: Settings → Variables and secrets → add secret AI_SERVICE_KEY\n" "2) Make sure the key belongs to the same Azure resource as the endpoint.\n" "3) Make sure the endpoint is the *resource endpoint* (regional) and ends with '/'.\n" "4) If the key was exposed, rotate/regenerate it in Azure.\n", image, ) except HttpResponseError as e: return (f"Azure request failed:\n{str(e)}", image) except Exception as e: tb = traceback.format_exc() return (f"APP ERROR:\n{e}\n\nTraceback:\n{tb}", image) demo = gr.Interface( fn=analyze_and_annotate, inputs=[ gr.Image(type="pil", label="Drag & drop an image"), gr.Textbox( label="AI_SERVICE_KEY (paste here) — or set as Spaces Secret AI_SERVICE_KEY", type="password", placeholder="Paste your key here (recommended: use Spaces Secrets instead)…", ), ], outputs=[ gr.Textbox(label="Face analysis (numbers + attributes + bounding boxes)", lines=18), gr.Image(type="pil", label="Annotated image (boxes + face numbers)"), ], title="Azure AI Face Detection (Drag & Drop)", description=( "If you get 401, your key and endpoint don’t match. " "Best practice on Spaces: set AI_SERVICE_KEY as a Secret." ), ) if __name__ == "__main__": # On Spaces you can also do demo.launch(server_name="0.0.0.0", server_port=7860) demo.launch()