import cv2 from cvzone.PoseModule import PoseDetector import gradio as gr import tempfile import os # Initialize pose detector poseDetector = PoseDetector() # Function to process video and detect poses def process_video(video_path): cap = cv2.VideoCapture(video_path) if not cap.isOpened(): raise ValueError("Could not open video file.") # Get video properties fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # Create a temporary file to save the processed video temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") temp_path = temp_file.name temp_file.close() # Initialize video writer fourcc = cv2.VideoWriter_fourcc(*"mp4v") out = cv2.VideoWriter(temp_path, fourcc, fps, (width, height)) posList = [] while True: success, img = cap.read() if not success: break # Detect pose img = poseDetector.findPose(img) lmList, bboxInfo = poseDetector.findPosition(img) if bboxInfo: lmString = '' for lm in lmList: lmString += f'{lm[0]},{img.shape[0]-lm[1]},{lm[2]},' posList.append(lmString) # Write the processed frame to the output video out.write(img) # Release video capture and writer cap.release() out.release() # Save pose data to a file with open("AnimationFile.txt", "w") as f: f.writelines(["%s\n" % item for item in posList]) # Return the processed video path and frames (empty list for now) return temp_path, [] # Gradio interface def gradio_interface(video): processed_video_path, _ = process_video(video) return processed_video_path # Create Gradio app iface = gr.Interface( fn=gradio_interface, inputs=gr.Video(label="Input Video"), outputs=gr.Video(label="Processed Video"), title="Pose Detection with MediaPipe", description="Upload a video to detect human poses using MediaPipe and OpenCV.", ) # Launch the app with a public link iface.launch(share=True)