import gradio as gr import spaces import numpy as np from PIL import Image import mediapipe as mp # Initialize MediaPipe once mp_holistic = mp.solutions.holistic mp_drawing = mp.solutions.drawing_utils @spaces.GPU def extract_pose(image: Image.Image) -> Image.Image: # Convert PIL→RGB numpy img = np.array(image.convert("RGB")) h, w, _ = img.shape # Run Holistic with mp_holistic.Holistic( static_image_mode=True, model_complexity=2, enable_segmentation=False, refine_face_landmarks=True, ) as holistic: results = holistic.process(img) # Create a blank (black) canvas canvas = np.zeros((h, w, 3), dtype=np.uint8) # Drawing specs: white lines, small circles pose_spec = mp_drawing.DrawingSpec(color=(255,255,255), thickness=2, circle_radius=2) hand_spec = mp_drawing.DrawingSpec(color=(255,255,255), thickness=1, circle_radius=1) # Draw body pose if results.pose_landmarks: mp_drawing.draw_landmarks( canvas, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS, pose_spec, pose_spec, ) # Draw left & right hand if results.left_hand_landmarks: mp_drawing.draw_landmarks( canvas, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, hand_spec, hand_spec, ) if results.right_hand_landmarks: mp_drawing.draw_landmarks( canvas, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, hand_spec, hand_spec, ) # Optional: face mesh if results.face_landmarks: mp_drawing.draw_landmarks( canvas, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION, hand_spec, hand_spec, ) # Convert back to PIL and return return Image.fromarray(canvas) # Gradio UI demo = gr.Interface( fn=extract_pose, inputs=gr.Image(type="pil", label="Upload Image"), outputs=gr.Image(label="Pure Pose Overlay"), title="🕺 Pure Skeleton Extractor", description="Uploads your photo → returns only the pose lines on a dark background." ) if __name__ == "__main__": demo.launch()