PoseTest / poseDetectionHugging.py
odeconto's picture
Upload poseDetectionHugging.py
f3f589c verified
import cv2
from cvzone.PoseModule import PoseDetector
import gradio as gr
# Initialize pose detector
poseDetector = PoseDetector()
# Function to process video and detect poses
def process_video(video_path):
cap = cv2.VideoCapture(video_path)
posList = []
output_frames = []
while True:
success, img = cap.read()
if not success:
break
# Detect pose
img = poseDetector.findPose(img)
lmList, bboxInfo = poseDetector.findPosition(img)
if bboxInfo:
lmString = ''
for lm in lmList:
lmString += f'{lm[0]},{img.shape[0]-lm[1]},{lm[2]},'
posList.append(lmString)
# Convert frame to RGB for Gradio display
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_frames.append(img_rgb)
# Release video capture
cap.release()
# Save pose data to a file
with open("AnimationFile.txt", "w") as f:
f.writelines(["%s\n" % item for item in posList])
# Return the processed frames as a video
return output_frames
# Gradio interface
def gradio_interface(video):
frames = process_video(video)
return frames
# Create Gradio app
iface = gr.Interface(
fn=gradio_interface,
inputs=gr.Video(label="Input Video"),
outputs=gr.Video(label="Processed Video"),
title="Pose Detection with MediaPipe",
description="Upload a video to detect human poses using MediaPipe and OpenCV.",
)
# Launch the app
iface.launch()