vu0018's picture
Update app.py
956cfce verified
raw
history blame
4.58 kB
import cv2
import mediapipe as mp
import numpy as np
import gradio as gr
import tempfile
import shutil
import os
# Initialize MediaPipe Pose
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
pose = mp_pose.Pose(static_image_mode=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
# Function to calculate joint angles
def calculate_angle(a, b, c):
a = np.array(a)
b = np.array(b)
c = np.array(c)
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(np.clip(cosine_angle, -1.0, 1.0))
return np.degrees(angle)
# Main video processing function
def detect_pose_video(video_path, max_duration=20):
try:
if not os.path.exists(video_path):
return None, "Error: Video file does not exist."
# Copy uploaded video to a temporary safe path
temp_input = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
temp_input.close()
shutil.copy(video_path, temp_input.name)
cap = cv2.VideoCapture(temp_input.name)
if not cap.isOpened():
return None, "Error: Cannot open video file."
fps = cap.get(cv2.CAP_PROP_FPS) or 20.0
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) or 640)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) or 480)
max_frames = int(fps * max_duration)
# Output temporary file
temp_output = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
temp_output.close()
out_path = temp_output.name
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
frame_count = 0
while frame_count < max_frames:
ret, frame = cap.read()
if not ret:
break
# Resize if frame is too large
max_dim = 640
h, w, _ = frame.shape
if max(h, w) > max_dim:
scale = max_dim / max(h, w)
frame = cv2.resize(frame, (int(w*scale), int(h*scale)))
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = pose.process(frame_rgb)
if results.pose_landmarks:
mp_drawing.draw_landmarks(
frame,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(0,0,255), thickness=2)
)
# Example: left elbow angle
landmarks = results.pose_landmarks.landmark
shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x * frame.shape[1],
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y * frame.shape[0]]
elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x * frame.shape[1],
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y * frame.shape[0]]
wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x * frame.shape[1],
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y * frame.shape[0]]
angle = calculate_angle(shoulder, elbow, wrist)
cv2.putText(frame, f"Left Elbow: {int(angle)} deg", (20,40),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0), 2)
out.write(frame)
frame_count += 1
cap.release()
out.release()
os.remove(temp_input.name) # Clean up input temp file
return out_path, "Video processed successfully."
except Exception as e:
return None, f"Runtime Error: {str(e)}"
# Gradio interface with submit button enabled only after video upload
with gr.Blocks() as demo:
gr.Markdown("## Human Pose Estimation on Video\nUpload a video (max 20 seconds). Submit is enabled only after upload.")
video_input = gr.Video(label="Upload Video")
status_output = gr.Textbox(label="Status")
video_output = gr.Video(label="Annotated Video")
submit_btn = gr.Button("Submit", interactive=False)
# Enable submit button only if a video is uploaded
def enable_submit(video):
return True if video else False
video_input.change(enable_submit, inputs=[video_input], outputs=[submit_btn])
submit_btn.click(detect_pose_video, inputs=[video_input], outputs=[video_output, status_output])
demo.launch()