Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from PIL import Image | |
| from ultralytics import YOLO | |
| import cv2, os | |
| from datetime import datetime | |
| import numpy as np | |
| import mediapipe as mp | |
| # ---------------- Config générale ---------------- | |
| MODEL_PATH = "best.pt" | |
| SAVE_DIR = os.path.join("/tmp", "results") | |
| os.makedirs(SAVE_DIR, exist_ok=True) | |
| # Charger le modèle YOLO | |
| model = YOLO(MODEL_PATH) | |
| # ---------------- MediaPipe Face Detection ---------------- | |
| mp_face_detection = mp.solutions.face_detection | |
| def _largest_face_bbox(np_img, min_conf: float = 0.6): | |
| """Retourne le plus grand bbox de visage (x1,y1,x2,y2) ou None""" | |
| h, w = np_img.shape[:2] | |
| with mp_face_detection.FaceDetection(min_detection_confidence=min_conf) as fd: | |
| results = fd.process(cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)) | |
| if not results.detections: | |
| return None | |
| boxes = [] | |
| for det in results.detections: | |
| rel = det.location_data.relative_bounding_box | |
| x1 = int(max(0, rel.xmin) * w) | |
| y1 = int(max(0, rel.ymin) * h) | |
| x2 = int(min(1.0, rel.xmin + rel.width) * w) | |
| y2 = int(min(1.0, rel.ymin + rel.height) * h) | |
| boxes.append((x1, y1, x2, y2)) | |
| boxes.sort(key=lambda b: (b[2]-b[0])*(b[3]-b[1]), reverse=True) | |
| return boxes[0] if boxes else None | |
| # ---------------- Fonctions de prédiction ---------------- | |
| def predict_image(image, conf=0.85, show_labels=True): | |
| np_img = np.array(image) | |
| # 1) Détection visage obligatoire | |
| face_bbox = _largest_face_bbox(np_img) | |
| if face_bbox is None: | |
| st.warning("⚠️ Aucun visage humain détecté. Veuillez centrer le visage.") | |
| return None | |
| # Convertir en BGR pour YOLO | |
| if np_img.shape[2] == 4: | |
| np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2BGR) | |
| else: | |
| np_img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR) | |
| results = model.predict(source=np_img, conf=conf, verbose=False) | |
| if len(results[0].boxes) == 0: | |
| return None | |
| annotated_image = results[0].plot(labels=show_labels) | |
| out_path = os.path.join(SAVE_DIR, f"image_result_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png") | |
| cv2.imwrite(out_path, annotated_image) | |
| return out_path | |
| def predict_video(video_path, conf=0.85, show_labels=True): | |
| cap = cv2.VideoCapture(video_path) | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| out_path = os.path.join(SAVE_DIR, f"video_result_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4") | |
| fps = cap.get(cv2.CAP_PROP_FPS) or 30 | |
| width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| out = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) | |
| detections = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Détection visage sur chaque frame | |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| face_bbox = _largest_face_bbox(frame_rgb) | |
| if face_bbox is None: | |
| continue # ignore cette frame si pas de visage | |
| results = model.predict(frame, conf=conf, verbose=False) | |
| if len(results[0].boxes) > 0: | |
| detections += 1 | |
| annotated = results[0].plot(labels=show_labels) | |
| out.write(annotated) | |
| cap.release() | |
| out.release() | |
| if detections == 0: | |
| return None | |
| return out_path | |
| # ---------------- Interface Streamlit ---------------- | |
| st.title("🧠 Stroke-IA – Détection AVC par IA") | |
| # Sidebar | |
| st.sidebar.header("⚙️ Paramètres") | |
| conf_threshold = st.sidebar.slider("Seuil de confiance", 0.1, 1.0, 0.85, 0.05) | |
| show_labels = st.sidebar.checkbox("Afficher les labels", value=True) | |
| # Exemples rapides | |
| st.sidebar.header("📂 Exemples rapides") | |
| if st.sidebar.button("Tester une image exemple"): | |
| if os.path.exists("example.jpg"): | |
| img = Image.open("example.jpg") | |
| path = predict_image(img, conf=conf_threshold, show_labels=show_labels) | |
| if path is None: | |
| st.success(f"✅ No stroke detected or no face detected.") | |
| else: | |
| st.image(path, caption="Exemple annoté", use_container_width=True) | |
| else: | |
| st.warning("⚠️ Aucun fichier example.jpg trouvé.") | |
| if st.sidebar.button("Tester une vidéo exemple"): | |
| if os.path.exists("example.mp4"): | |
| path = predict_video("example.mp4", conf=conf_threshold, show_labels=show_labels) | |
| if path is None: | |
| st.success(f"✅ No stroke detected or no face detected.") | |
| else: | |
| st.video(path) | |
| else: | |
| st.warning("⚠️ Aucun fichier example.mp4 trouvé.") | |
| # Section vidéo upload | |
| st.header("🎥 Détection sur vidéo") | |
| video_file = st.file_uploader("Uploader une vidéo (mp4, mov, etc.)", type=["mp4", "mov"]) | |
| if video_file and st.button("Analyser la vidéo"): | |
| temp_path = os.path.join(SAVE_DIR, "temp_video.mp4") | |
| with open(temp_path, "wb") as f: | |
| f.write(video_file.read()) | |
| result_path = predict_video(temp_path, conf=conf_threshold, show_labels=show_labels) | |
| if result_path is None: | |
| st.success(f"✅ No stroke detected or no face detected.") | |
| else: | |
| st.video(result_path) | |
| # Section image upload | |
| st.header("🖼️ Détection sur image") | |
| image_file = st.file_uploader("Uploader une image", type=["jpg", "jpeg", "png"]) | |
| if image_file and st.button("Analyser l'image"): | |
| image = Image.open(image_file) | |
| result_path = predict_image(image, conf=conf_threshold, show_labels=show_labels) | |
| if result_path is None: | |
| st.success(f"✅ No stroke detected or no face detected.") | |
| else: | |
| st.image(result_path, caption="Image annotée", use_container_width=True) | |
| # Disclaimer | |
| st.markdown(f""" | |
| --- | |
| 👨💻 **Badsi Djilali** — Ingénieur Deep Learning | |
| 🚀 Créateur de **Stroke_IA_Detection** | |
| 🧠 (Détection d'asymétrie faciale & AVC par IA) | |
| ⚠️ **Disclaimer :** Stroke-IA est une démo technique, pas un avis médical. | |
| © {datetime.now().year} — Badsi Djilali. | |
| """) | |