| import cv2 |
| from cvzone.PoseModule import PoseDetector |
| import gradio as gr |
| import tempfile |
| import os |
|
|
| |
| poseDetector = PoseDetector() |
|
|
| |
| def process_video(video_path): |
| cap = cv2.VideoCapture(video_path) |
| if not cap.isOpened(): |
| raise ValueError("Could not open video file.") |
|
|
| |
| fps = int(cap.get(cv2.CAP_PROP_FPS)) |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
|
| |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") |
| temp_path = temp_file.name |
| temp_file.close() |
|
|
| |
| fourcc = cv2.VideoWriter_fourcc(*"mp4v") |
| out = cv2.VideoWriter(temp_path, fourcc, fps, (width, height)) |
|
|
| posList = [] |
|
|
| while True: |
| success, img = cap.read() |
| if not success: |
| break |
|
|
| |
| img = poseDetector.findPose(img) |
| lmList, bboxInfo = poseDetector.findPosition(img) |
|
|
| if bboxInfo: |
| lmString = '' |
| for lm in lmList: |
| lmString += f'{lm[0]},{img.shape[0]-lm[1]},{lm[2]},' |
| posList.append(lmString) |
|
|
| |
| out.write(img) |
|
|
| |
| cap.release() |
| out.release() |
|
|
| |
| with open("AnimationFile.txt", "w") as f: |
| f.writelines(["%s\n" % item for item in posList]) |
|
|
| |
| return temp_path, [] |
|
|
| |
| def gradio_interface(video): |
| processed_video_path, _ = process_video(video) |
| return processed_video_path |
|
|
| |
| iface = gr.Interface( |
| fn=gradio_interface, |
| inputs=gr.Video(label="Input Video"), |
| outputs=gr.Video(label="Processed Video"), |
| title="Pose Detection with MediaPipe", |
| description="Upload a video to detect human poses using MediaPipe and OpenCV.", |
| ) |
|
|
| |
| iface.launch(share=True) |