import gradio as gr import cv2 import mediapipe as mp import numpy as np import os mp_hands = mp.solutions.hands hands = mp_hands.Hands(max_num_hands=1) slides = sorted([f"slides/{img}" for img in os.listdir("slides")]) index = 0 def detect(frame): global index h, w, c = frame.shape frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) result = hands.process(frame_rgb) if result.multi_hand_landmarks: lm = result.multi_hand_landmarks[0] x = lm.landmark[mp_hands.HandLandmark.WRIST].x * w if x < w * 0.3: index = max(0, index - 1) elif x > w * 0.7: index = min(len(slides)-1, index + 1) slide = cv2.imread(slides[index]) slide = cv2.cvtColor(slide, cv2.COLOR_BGR2RGB) return slide demo = gr.Interface( fn=detect, live=True, inputs=gr.Image(source="webcam", streaming=True), outputs="image" ) demo.launch()