AmirKaseb commited on
Commit
b13445f
·
verified ·
1 Parent(s): 3784a05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -15
app.py CHANGED
@@ -1,43 +1,40 @@
1
  import cv2
2
  import mediapipe as mp
3
  import gradio as gr
4
- import numpy as np
5
 
6
  mp_drawing = mp.solutions.drawing_utils
7
  mp_pose = mp.solutions.pose
8
 
9
- def detect_pose(image):
10
  with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
11
- results = pose.process(image)
12
-
13
  if results.pose_landmarks:
14
- mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
15
  landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=4),
16
  connection_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2))
17
- return image
18
 
19
- def process_frame(frame):
20
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
21
  frame = detect_pose(frame)
22
  return frame
23
 
24
- def capture_video():
25
  cap = cv2.VideoCapture(0)
26
  while True:
27
  ret, frame = cap.read()
28
  if not ret:
29
  break
30
- frame = process_frame(frame)
 
31
  frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
32
  yield frame
33
 
34
  iface = gr.Interface(
35
- fn=lambda frame: frame,
36
- inputs=gr.inputs.Video(source="webcam", type="numpy"),
37
  outputs="video",
38
  live=True,
39
- title="Live Pose Detection",
40
- allow_flagging="never"
41
  )
42
 
43
- iface.launch()
 
1
  import cv2
2
  import mediapipe as mp
3
  import gradio as gr
 
4
 
5
  mp_drawing = mp.solutions.drawing_utils
6
  mp_pose = mp.solutions.pose
7
 
8
+ def detect_pose(frame):
9
  with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
10
+ results = pose.process(frame)
 
11
  if results.pose_landmarks:
12
+ mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
13
  landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=4),
14
  connection_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2))
15
+ return frame
16
 
17
+ def process_video(frame):
 
18
  frame = detect_pose(frame)
19
  return frame
20
 
21
+ def video_stream():
22
  cap = cv2.VideoCapture(0)
23
  while True:
24
  ret, frame = cap.read()
25
  if not ret:
26
  break
27
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
28
+ frame = process_video(frame)
29
  frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
30
  yield frame
31
 
32
  iface = gr.Interface(
33
+ fn=lambda x: x,
34
+ inputs=gr.Video(source="webcam", type="numpy"),
35
  outputs="video",
36
  live=True,
37
+ title="Live Pose Detection"
 
38
  )
39
 
40
+ iface.launch()