HackLYTDEMO / app.py
pn23's picture
Update app.py
5ea36be verified
import gradio as gr
import os
import cv2
import math
import numpy as np
import plotly.express as px
from pathlib import Path
import mediapipe as mp
import matplotlib.pyplot as plt
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
text_options = gr.Dropdown(['Knee Extension', 'Dumbbell Thrust', 'Lying Down External Rotation', 'Internal & External Rotation', 'Deadlift', 'Planks'], label="Select An Exercise")
def video_identity(video):
cap = cv2.VideoCapture(video)
cap = cv2.VideoCapture("out.mp4")
while cap.isOpened():
with mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.5, model_complexity=2) as pose:
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
results = pose.process(frame, cv2.COLOR_BGR2RGB)
mp_drawing.plot_landmarks(results.pose_world_landmarks, mp_pose.POSE_CONNECTIONS)
buffer = io.BytesIO()
plt.savefig(buffer, format='png')
buffer.seek(0)
# Read the saved image using OpenCV
image = cv2.imdecode(np.frombuffer(buffer.read(), np.uint8), -1)
output.write(image)
if cv.waitKey(1) == ord('q'):
break
cap.release()
cv.destroyAllWindows()
return video
demo = gr.Interface(fn=video_identity,
inputs="video",
outputs="video",
# examples=[
# os.path.join(os.path.dirname(__file__),
# "video/video_sample.mp4")],
cache_examples=True)
if __name__ == "__main__":
demo.launch()