Update app.py
Browse files
app.py
CHANGED
|
@@ -2,12 +2,52 @@ import gradio as gr
|
|
| 2 |
import cv2
|
| 3 |
import mediapipe as mp
|
| 4 |
import numpy as np
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Initialize Mediapipe Pose Estimation
|
| 7 |
mp_pose = mp.solutions.pose
|
| 8 |
pose = mp_pose.Pose(static_image_mode=True, model_complexity=2)
|
| 9 |
mp_drawing = mp.solutions.drawing_utils
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def estimate_pose(image):
|
| 12 |
# Convert image from BGR (OpenCV) to RGB
|
| 13 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
@@ -15,7 +55,7 @@ def estimate_pose(image):
|
|
| 15 |
results = pose.process(image_rgb)
|
| 16 |
|
| 17 |
if not results.pose_landmarks:
|
| 18 |
-
return image #
|
| 19 |
|
| 20 |
# Draw pose landmarks on the image
|
| 21 |
annotated_image = image.copy()
|
|
@@ -27,15 +67,18 @@ def estimate_pose(image):
|
|
| 27 |
connection_drawing_spec=mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2),
|
| 28 |
)
|
| 29 |
|
| 30 |
-
return annotated_image
|
| 31 |
|
| 32 |
# Gradio Interface
|
| 33 |
interface = gr.Interface(
|
| 34 |
fn=estimate_pose,
|
| 35 |
inputs=gr.Image(type="numpy", label="Upload an Image"),
|
| 36 |
-
outputs=
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
| 39 |
)
|
| 40 |
|
| 41 |
# Launch the Gradio app
|
|
|
|
| 2 |
import cv2
|
| 3 |
import mediapipe as mp
|
| 4 |
import numpy as np
|
| 5 |
+
from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
|
| 6 |
+
import torch
|
| 7 |
|
| 8 |
# Initialize Mediapipe Pose Estimation
|
| 9 |
mp_pose = mp.solutions.pose
|
| 10 |
pose = mp_pose.Pose(static_image_mode=True, model_complexity=2)
|
| 11 |
mp_drawing = mp.solutions.drawing_utils
|
| 12 |
|
| 13 |
+
# Initialize Segformer Model for Segmentation
|
| 14 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
|
| 15 |
+
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
|
| 16 |
+
|
| 17 |
+
# Define body part mapping with unique colors
|
| 18 |
+
PART_COLORS = {
|
| 19 |
+
"head": (0, 255, 0),
|
| 20 |
+
"shoulders": (255, 0, 0),
|
| 21 |
+
"upper_body": (0, 0, 255),
|
| 22 |
+
"arms": (255, 255, 0),
|
| 23 |
+
"lower_body": (255, 0, 255)
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
PART_LABELS = {
|
| 27 |
+
"head": [0], # Face class in Segformer
|
| 28 |
+
"shoulders": [2], # Upper body classes (may include neck, shoulders)
|
| 29 |
+
"upper_body": [3, 4], # Torso classes
|
| 30 |
+
"arms": [5, 6], # Arms
|
| 31 |
+
"lower_body": [7, 8] # Legs
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
def segment_image(image):
|
| 35 |
+
# Preprocess the image for Segformer
|
| 36 |
+
inputs = feature_extractor(images=image, return_tensors="pt")
|
| 37 |
+
outputs = model(**inputs)
|
| 38 |
+
logits = outputs.logits
|
| 39 |
+
segmentation = torch.argmax(logits, dim=1).squeeze().cpu().numpy()
|
| 40 |
+
|
| 41 |
+
# Create a blank mask image
|
| 42 |
+
segmented_image = np.zeros_like(image)
|
| 43 |
+
|
| 44 |
+
# Color each part with unique colors
|
| 45 |
+
for part, color in PART_COLORS.items():
|
| 46 |
+
mask = np.isin(segmentation, PART_LABELS[part])
|
| 47 |
+
segmented_image[mask] = color
|
| 48 |
+
|
| 49 |
+
return segmented_image
|
| 50 |
+
|
| 51 |
def estimate_pose(image):
|
| 52 |
# Convert image from BGR (OpenCV) to RGB
|
| 53 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
|
|
| 55 |
results = pose.process(image_rgb)
|
| 56 |
|
| 57 |
if not results.pose_landmarks:
|
| 58 |
+
return image, segment_image(image) # Return original image and segmented image if no pose found
|
| 59 |
|
| 60 |
# Draw pose landmarks on the image
|
| 61 |
annotated_image = image.copy()
|
|
|
|
| 67 |
connection_drawing_spec=mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2),
|
| 68 |
)
|
| 69 |
|
| 70 |
+
return annotated_image, segment_image(image)
|
| 71 |
|
| 72 |
# Gradio Interface
|
| 73 |
interface = gr.Interface(
|
| 74 |
fn=estimate_pose,
|
| 75 |
inputs=gr.Image(type="numpy", label="Upload an Image"),
|
| 76 |
+
outputs=[
|
| 77 |
+
gr.Image(type="numpy", label="Pose Landmarks Image"),
|
| 78 |
+
gr.Image(type="numpy", label="Segmented Body Parts"),
|
| 79 |
+
],
|
| 80 |
+
title="Human Pose Estimation and Segmentation",
|
| 81 |
+
description="Upload an image to detect and visualize human pose landmarks and segment body parts (head, shoulders, upper body, arms, lower body) with different colors.",
|
| 82 |
)
|
| 83 |
|
| 84 |
# Launch the Gradio app
|