Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import cv2 | |
| import tempfile | |
| import os | |
| from openai import OpenAI | |
| # π OpenAI key from environment | |
| client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
| def extract_key_frames(video_path, frame_interval=30): | |
| cap = cv2.VideoCapture(video_path) | |
| notes = [] | |
| count = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret or len(notes) >= 5: | |
| break | |
| if count % frame_interval == 0: | |
| height, width = frame.shape[:2] | |
| aspect_ratio = width / height | |
| body_position = "horizontal" if aspect_ratio > 1.3 else "vertical or turning" | |
| notes.append(f"Frame {count}: Swimmer appears {body_position}, arms possibly extended.") | |
| count += 1 | |
| cap.release() | |
| return notes | |
| def analyze_swim_uploaded(video_input, stroke, distance, lane): | |
| if not video_input: | |
| return "β Please upload a video." | |
| try: | |
| if isinstance(video_input, str): | |
| video_path = video_input | |
| else: | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
| tmp.write(video_input.read()) | |
| video_path = tmp.name | |
| notes = extract_key_frames(video_path) | |
| prompt = ( | |
| f"You are an expert swim coach. The swimmer is in {lane}, swimming a {distance} {stroke}.\n\n" | |
| "Here are some frame observations:\n" | |
| + "\n".join(notes) + "\n\n" | |
| "Based on these, please:\n" | |
| "1. Give detailed technique feedback.\n" | |
| "2. Provide a technique score from 1 to 10.\n" | |
| "3. List the top 3 areas for improvement.\n" | |
| ) | |
| response = client.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=[ | |
| {"role": "system", "content": "You are a proffesional swim coach who analyzes technique."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.5, | |
| max_tokens=500 | |
| ) | |
| feedback_text = response.choices[0].message.content | |
| if not isinstance(video_input, str) and os.path.exists(video_path): | |
| os.remove(video_path) | |
| return f"π {distance} {stroke} β {lane}\n\nπ― AI Score & Feedback:\n{feedback_text}" | |
| except Exception as e: | |
| return f"β Error: {str(e)}" | |
| iface = gr.Interface( | |
| fn=analyze_swim_uploaded, | |
| inputs=[ | |
| gr.Video(label="Upload Swimming Video"), | |
| gr.Dropdown(["Freestyle", "Backstroke", "Breaststroke", "Butterfly", "IM"], label="Stroke"), | |
| gr.Dropdown(["50m", "100m", "200m", "400m", "800m", "1500m"], label="Distance"), | |
| gr.Dropdown([f"Lane {i}" for i in range(1, 11)], label="Lane") | |
| ], | |
| outputs=gr.Textbox(label="AI Swim Feedback"), | |
| title="π AI Swim Coach", | |
| description="Upload a swim video, select stroke, distance, and lane. Get GPT-3.5-powered feedback, score, and top 3 fixes!" | |
| ) | |
| iface.launch() | |