Commit History

Create index.js
ff394a4
verified

VIATEUR-AI commited on

Create yarn.lock
f44f751
verified

VIATEUR-AI commited on

Create vite.config.js
3d27550
verified

VIATEUR-AI commited on

Create package.json
d6e4ffb
verified

VIATEUR-AI commited on

Create package-lock.json
fa66939
verified

VIATEUR-AI commited on

Create nginx.conf
ae89f36
verified

VIATEUR-AI commited on

Create index.html
a77ebb2
verified

VIATEUR-AI commited on

Create eslint.config.js
05f9b18
verified

VIATEUR-AI commited on

Update README.md
084956d
verified

VIATEUR-AI commited on

Update README.md
f4f5667
verified

VIATEUR-AI commited on

Create Dockerfile
37122d8
verified

VIATEUR-AI commited on

Create .gitignore
a0e80ec
verified

VIATEUR-AI commited on

Update .gitattributes
eada46c
verified

VIATEUR-AI commited on

Delete requirements.txt
0ae033a
verified

VIATEUR-AI commited on

Delete app.py
15d28f0
verified

VIATEUR-AI commited on

Update requirements.txt
d501a5f
verified

VIATEUR-AI commited on

Update requirements.txt
9d81c60
verified

VIATEUR-AI commited on

Update requirements.txt
cfa9537
verified

VIATEUR-AI commited on

import gradio as gr import cv2 import mediapipe as mp mp_hands = mp.solutions.hands hands = mp_hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5) # AI feedback logic based on hand positions def surgical_feedback_webcam(video_path): cap = cv2.VideoCapture(video_path) feedback_messages = [] while cap.isOpened(): ret, frame = cap.read() if not ret: break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) result = hands.process(frame_rgb) if result.multi_hand_landmarks: for hand_landmarks in result.multi_hand_landmarks: wrist_y = hand_landmarks.landmark[mp_hands.HandLandmark.WRIST].y if wrist_y < 0.3: feedback_messages.append("Gabanya height y'igikoresho.") else: feedback_messages.append("Position y'amaboko ni nziza.") cap.release() return "\n".join(feedback_messages[-5:]) # last 5 feedbacks with gr.Blocks() as demo: gr.Markdown("# Full Interactive 3D Surgical Training AI") gr.Markdown("Reba 3D mannequin, ukoreshe webcam cyangwa wandike surgical action, AI izaguha feedback ako kanya.") with gr.Row(): # Left: 3D mannequin gr.HTML( """ <model-viewer src="assets/mannequin.glb" alt="3D Mannequin" auto-rotate camera-controls style="width: 400px; height: 400px;"> </model-viewer> """ ) # Right: User input + feedback with gr.Column(): user_input = gr.Textbox(label="Describe your surgical action") video_input = gr.Video(label="Or upload hand movement video") feedback = gr.Textbox(label="AI Feedback") # Text input feedback user_input.submit(lambda x: "Byiza! Komeza gutya." if x=="" else "AI Feedback: "+x, user_input, feedback) # Video input feedback video_input.upload(surgical_feedback_webcam, video_input, feedback) demo.launch()
a66209f
verified

VIATEUR-AI commited on

import gradio as gr # Simple AI feedback def surgical_feedback(action_description): action = action_description.lower() if "scalpel too high" in action: return "Gabanya umuvuduko w'igikoresho." elif "cut too deep" in action: return "Gabanya depth y'igikata." elif "incorrect angle" in action: return "Hindura angle y'igikoresho." else: return "Byiza! Komeza gutya." with gr.Blocks() as demo: gr.Markdown("# 3D Surgical Training AI Prototype") gr.Markdown( "Reba mannequin hasi, wandike uko wakoze igikorwa, AI izaguha feedback." ) with gr.Row(): # 3D mannequin gr.HTML( """ <model-viewer src="assets/mannequin.glb" alt="3D Mannequin" auto-rotate camera-controls style="width: 400px; height: 400px;"> </model-viewer> """ ) # User input + AI feedback with gr.Column(): user_input = gr.Textbox(label="Describe your surgical action") feedback = gr.Textbox(label="AI Feedback") user_input.submit(surgical_feedback, user_input, feedback) demo.launch()
a01cf00
verified

VIATEUR-AI commited on

import gradio as gr # Simple AI feedback logic def surgical_feedback(action_description): action = action_description.lower() if "scalpel too high" in action: return "Gabanya umuvuduko w'igikoresho." elif "cut too deep" in action: return "Gabanya depth y'igikata." elif "incorrect angle" in action: return "Hindura angle y'igikoresho." else: return "Byiza! Komeza gutya." with gr.Blocks() as demo: gr.Markdown("# Surgical Training AI Prototype") gr.Markdown( "Reba mannequin hasi, wandike uko wakoze igikorwa, AI izaguha feedback." ) with gr.Row(): # Left: 3D mannequin embed gr.HTML( """ <model-viewer src="assets/mannequin.glb" alt="3D Mannequin" auto-rotate camera-controls style="width: 400px; height: 400px;"> </model-viewer> """ ) # Right: User input + AI feedback with gr.Column(): user_input = gr.Textbox(label="Describe your surgical action") feedback = gr.Textbox(label="AI Feedback") user_input.submit(surgical_feedback, user_input, feedback) demo.launch()
1367b8e
verified

VIATEUR-AI commited on

initial commit
2dcdbfe
verified

VIATEUR-AI commited on