odeconto commited on
Commit
f3f589c
·
verified ·
1 Parent(s): 4ae3e8a

Upload poseDetectionHugging.py

Browse files
Files changed (1) hide show
  1. poseDetectionHugging.py +58 -0
poseDetectionHugging.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from cvzone.PoseModule import PoseDetector
3
+ import gradio as gr
4
+
5
+ # Initialize pose detector
6
+ poseDetector = PoseDetector()
7
+
8
+ # Function to process video and detect poses
9
+ def process_video(video_path):
10
+ cap = cv2.VideoCapture(video_path)
11
+ posList = []
12
+ output_frames = []
13
+
14
+ while True:
15
+ success, img = cap.read()
16
+ if not success:
17
+ break
18
+
19
+ # Detect pose
20
+ img = poseDetector.findPose(img)
21
+ lmList, bboxInfo = poseDetector.findPosition(img)
22
+
23
+ if bboxInfo:
24
+ lmString = ''
25
+ for lm in lmList:
26
+ lmString += f'{lm[0]},{img.shape[0]-lm[1]},{lm[2]},'
27
+ posList.append(lmString)
28
+
29
+ # Convert frame to RGB for Gradio display
30
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
31
+ output_frames.append(img_rgb)
32
+
33
+ # Release video capture
34
+ cap.release()
35
+
36
+ # Save pose data to a file
37
+ with open("AnimationFile.txt", "w") as f:
38
+ f.writelines(["%s\n" % item for item in posList])
39
+
40
+ # Return the processed frames as a video
41
+ return output_frames
42
+
43
+ # Gradio interface
44
+ def gradio_interface(video):
45
+ frames = process_video(video)
46
+ return frames
47
+
48
+ # Create Gradio app
49
+ iface = gr.Interface(
50
+ fn=gradio_interface,
51
+ inputs=gr.Video(label="Input Video"),
52
+ outputs=gr.Video(label="Processed Video"),
53
+ title="Pose Detection with MediaPipe",
54
+ description="Upload a video to detect human poses using MediaPipe and OpenCV.",
55
+ )
56
+
57
+ # Launch the app
58
+ iface.launch()