Spaces:
Running
Running
| """Gradio app for face detection and age estimation. | |
| This app embeds the face-age-inference engine directly for deployment | |
| on HuggingFace Spaces (which cannot make external HTTP calls). | |
| """ | |
| import cv2 | |
| import gradio as gr | |
| import numpy as np | |
| import spaces | |
| from face_age_inference import ( | |
| FaceAgeInferenceEngine, | |
| InferenceError, | |
| ) | |
| # Initialize the inference engine once at startup | |
| engine: FaceAgeInferenceEngine | None = None | |
| def get_engine() -> FaceAgeInferenceEngine: | |
| """Get or create the inference engine singleton.""" | |
| global engine | |
| if engine is None: | |
| engine = FaceAgeInferenceEngine() | |
| return engine | |
| def predict(image: np.ndarray | None) -> tuple[np.ndarray | None, str]: | |
| """Run face detection and age estimation on an image. | |
| Args: | |
| image: Input image as RGB numpy array from Gradio. | |
| Returns: | |
| Tuple of (annotated image, results text). | |
| """ | |
| if image is None: | |
| return None, "Please upload an image." | |
| try: | |
| # Convert RGB to BGR for OpenCV processing | |
| image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| # Run inference | |
| engine = get_engine() | |
| result = engine.predict(image_bgr) | |
| face_count = len(result.ages) | |
| # Convert annotated image back to RGB for Gradio | |
| annotated_rgb = cv2.cvtColor(result.annotated_image, cv2.COLOR_BGR2RGB) | |
| # Format results text | |
| if face_count == 0: | |
| results_text = "No faces detected." | |
| else: | |
| # Count minors (under 18, excluding unknown ages) | |
| minors_count = sum( | |
| 1 for age in result.ages if not np.isnan(age) and age < 18 | |
| ) | |
| face_word = "face" if face_count == 1 else "faces" | |
| minor_word = "person" if minors_count == 1 else "people" | |
| results_text = ( | |
| f"Detected {face_count} {face_word}.\n" | |
| f"Estimated {minors_count} {minor_word} under 18." | |
| ) | |
| return annotated_rgb, results_text | |
| except InferenceError as e: | |
| return None, f"Error: {e}" | |
| except Exception as e: | |
| return None, f"Unexpected error: {e}" | |
| # Create Gradio interface | |
| demo = gr.Interface( | |
| fn=predict, | |
| inputs=gr.Image( | |
| label="Upload Image", | |
| type="numpy", | |
| sources=["upload", "webcam", "clipboard"], | |
| ), | |
| outputs=[ | |
| gr.Image(label="Annotated Image", type="numpy"), | |
| gr.Textbox(label="Results", lines=2), | |
| ], | |
| title="Face Detection & Age Estimation", | |
| description=( | |
| "Upload an image to detect faces and estimate ages. " | |
| "Faces are highlighted with bounding boxes: " | |
| "**green** for minors (under 18), **blue** for adults." | |
| ), | |
| examples=[], | |
| cache_examples=False, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0") | |