| import cv2
|
| from cvzone.PoseModule import PoseDetector
|
| import gradio as gr
|
|
|
|
|
| poseDetector = PoseDetector()
|
|
|
|
|
| def process_video(video_path):
|
| cap = cv2.VideoCapture(video_path)
|
| posList = []
|
| output_frames = []
|
|
|
| while True:
|
| success, img = cap.read()
|
| if not success:
|
| break
|
|
|
|
|
| img = poseDetector.findPose(img)
|
| lmList, bboxInfo = poseDetector.findPosition(img)
|
|
|
| if bboxInfo:
|
| lmString = ''
|
| for lm in lmList:
|
| lmString += f'{lm[0]},{img.shape[0]-lm[1]},{lm[2]},'
|
| posList.append(lmString)
|
|
|
|
|
| img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| output_frames.append(img_rgb)
|
|
|
|
|
| cap.release()
|
|
|
|
|
| with open("AnimationFile.txt", "w") as f:
|
| f.writelines(["%s\n" % item for item in posList])
|
|
|
|
|
| return output_frames
|
|
|
|
|
| def gradio_interface(video):
|
| frames = process_video(video)
|
| return frames
|
|
|
|
|
| iface = gr.Interface(
|
| fn=gradio_interface,
|
| inputs=gr.Video(label="Input Video"),
|
| outputs=gr.Video(label="Processed Video"),
|
| title="Pose Detection with MediaPipe",
|
| description="Upload a video to detect human poses using MediaPipe and OpenCV.",
|
| )
|
|
|
|
|
| iface.launch() |