| import cv2 |
| import numpy as np |
| import mediapipe as mp |
| import gradio as gr |
| import tempfile |
| import os |
|
|
| mp_drawing = mp.solutions.drawing_utils |
| mp_pose = mp.solutions.pose |
|
|
| |
| def draw_pose_landmarks(image, results): |
| annotated_image = image.copy() |
| if results.pose_landmarks: |
| mp_drawing.draw_landmarks( |
| annotated_image, |
| results.pose_landmarks, |
| mp_pose.POSE_CONNECTIONS, |
| landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2), |
| connection_drawing_spec=mp_drawing.DrawingSpec(color=(255,0,0), thickness=2) |
| ) |
| return annotated_image |
|
|
| |
| def process_video(video_file): |
| with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_output: |
| output_path = temp_output.name |
|
|
| cap = cv2.VideoCapture(video_file) |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
| fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
| out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) |
|
|
| with mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: |
| while cap.isOpened(): |
| ret, frame = cap.read() |
| if not ret: |
| break |
|
|
| |
| image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
| results = pose.process(image_rgb) |
|
|
| |
| annotated_frame = draw_pose_landmarks(frame, results) |
| out.write(annotated_frame) |
|
|
| cap.release() |
| out.release() |
|
|
| return output_path |
|
|
| |
| interface = gr.Interface( |
| fn=process_video, |
| inputs=gr.Video(label="動画をアップロードしてください"), |
| outputs=gr.Video(label="ポーズ認識後の動画"), |
| title="MediaPipeによる棒人間ポーズ認識", |
| description="アップロードした動画に対してMediaPipeでポーズ検出し、棒人間を描画します。" |
| ) |
|
|
| if __name__ == "__main__": |
| interface.launch() |
|
|