import gradio as gr import cv2 import mediapipe as mp mp_hands = mp.solutions.hands hands = mp_hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5) # AI feedback logic based on hand positions def surgical_feedback_webcam(video_path): cap = cv2.VideoCapture(video_path) feedback_messages = [] while cap.isOpened(): ret, frame = cap.read() if not ret: break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) result = hands.process(frame_rgb) if result.multi_hand_landmarks: for hand_landmarks in result.multi_hand_landmarks: wrist_y = hand_landmarks.landmark[mp_hands.HandLandmark.WRIST].y if wrist_y < 0.3: feedback_messages.append("Gabanya height y'igikoresho.") else: feedback_messages.append("Position y'amaboko ni nziza.") cap.release() return "\n".join(feedback_messages[-5:]) # last 5 feedbacks with gr.Blocks() as demo: gr.Markdown("# Full Interactive 3D Surgical Training AI") gr.Markdown("Reba 3D mannequin, ukoreshe webcam cyangwa wandike surgical action, AI izaguha feedback ako kanya.") with gr.Row(): # Left: 3D mannequin gr.HTML( """ <model-viewer src="assets/mannequin.glb" alt="3D Mannequin" auto-rotate camera-controls style="width: 400px; height: 400px;"> </model-viewer> """ ) # Right: User input + feedback with gr.Column(): user_input = gr.Textbox(label="Describe your surgical action") video_input = gr.Video(label="Or upload hand movement video") feedback = gr.Textbox(label="AI Feedback") # Text input feedback user_input.submit(lambda x: "Byiza! Komeza gutya." if x=="" else "AI Feedback: "+x, user_input, feedback) # Video input feedback video_input.upload(surgical_feedback_webcam, video_input, feedback) demo.launch()