Spaces:
Sleeping
Sleeping
first commit
Browse files- .dockerignore +5 -0
- .gitignore +19 -0
- Dockerfile +38 -0
- README.md +5 -3
- app/config.py +17 -0
- app/main.py +144 -0
- app/model_registry.py +14 -0
- app/model_status.py +98 -0
- app/utils.py +46 -0
- app/voting.py +168 -0
- app/voting_save_para.py +116 -0
- requirements.txt +89 -0
.dockerignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
venv/
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
.DS_Store
|
| 5 |
+
.env
|
.gitignore
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environnements virtuels
|
| 2 |
+
venv/
|
| 3 |
+
.venv/
|
| 4 |
+
env/
|
| 5 |
+
.env/
|
| 6 |
+
|
| 7 |
+
# Fichiers Python compilés
|
| 8 |
+
app/__pycache__/
|
| 9 |
+
*.pyc
|
| 10 |
+
|
| 11 |
+
# Fichiers système
|
| 12 |
+
.DS_Store
|
| 13 |
+
|
| 14 |
+
# Configurations IDE
|
| 15 |
+
.vscode/
|
| 16 |
+
.idea/
|
| 17 |
+
|
| 18 |
+
# Fichiers de logs
|
| 19 |
+
*.log
|
Dockerfile
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
#Installation des dépendances système supplémentaires
|
| 4 |
+
RUN apt-get update && apt-get install -y \
|
| 5 |
+
libgl1-mesa-glx \
|
| 6 |
+
libglib2.0-0 \
|
| 7 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 8 |
+
|
| 9 |
+
# Création de l'utilisateur user (requis par Hugging Face)
|
| 10 |
+
RUN useradd -m -u 1000 user
|
| 11 |
+
ENV HOME=/home/user
|
| 12 |
+
ENV PATH=/home/user/.local/bin:$PATH
|
| 13 |
+
|
| 14 |
+
# Définir le répertoire de travail
|
| 15 |
+
WORKDIR $HOME/app
|
| 16 |
+
|
| 17 |
+
# Copier les fichiers de requirements avec les bonnes permissions
|
| 18 |
+
COPY --chown=user:user requirements.txt .
|
| 19 |
+
|
| 20 |
+
# Passer à l'utilisateur user
|
| 21 |
+
USER user
|
| 22 |
+
|
| 23 |
+
# Installer les dépendances Python
|
| 24 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 25 |
+
pip install --no-cache-dir -r requirements.txt
|
| 26 |
+
|
| 27 |
+
# Copier le reste de l'application
|
| 28 |
+
COPY --chown=user:user . .
|
| 29 |
+
|
| 30 |
+
# Port requis par Hugging Face Spaces
|
| 31 |
+
EXPOSE 7860
|
| 32 |
+
|
| 33 |
+
# Variables d'environnement pour Hugging Face Spaces
|
| 34 |
+
ENV PORT=7860
|
| 35 |
+
ENV HOST=0.0.0.0
|
| 36 |
+
|
| 37 |
+
# Commande de démarrage compatible HF Spaces
|
| 38 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "info"]
|
README.md
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
---
|
| 2 |
title: Reco Orchestrator Api
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: Reco Orchestrator Api
|
| 3 |
+
emoji: 👁
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: pink
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
+
short_description: Api d'orchestrateur des micros services d'inférence
|
| 9 |
---
|
| 10 |
|
| 11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
| 12 |
+
uvicorn app.main:app --host 0.0.0.0 --port 7860 --log-level debug
|
app/config.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_configs_ = [
|
| 2 |
+
{
|
| 3 |
+
"model_name": "efficientnetv2m",
|
| 4 |
+
"url": "https://rkonan-reco-efficientnet-api.hf.space/predict",
|
| 5 |
+
"health_url": "https://rkonan-reco-efficientnet-api.hf.space/health"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"model_name": "resnet50v2",
|
| 9 |
+
"url": "https://rkonan-reco-resnet-api.hf.space/predict",
|
| 10 |
+
"health_url": "https://rkonan-reco-resnet-api.hf.space/health"
|
| 11 |
+
}
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
# Ping interval en secondes
|
| 15 |
+
HEALTH_CHECK_INTERVAL = 60 # à ajuster selon besoin
|
| 16 |
+
HEARTBEAT_TIMEOUT_SECONDS = 120 # 2 minutes
|
| 17 |
+
HEALTHCHECK_INTERVAL_SECONDS = 60
|
app/main.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException, Request, Query,Body
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from typing import Union
|
| 4 |
+
import base64
|
| 5 |
+
import logging
|
| 6 |
+
from app.voting import soft_voting
|
| 7 |
+
from app.model_status import monitor_models_async, get_model_status,get_alive_model_configs,check_and_update_model_status_once
|
| 8 |
+
from app.config import HEALTH_CHECK_INTERVAL,HEALTHCHECK_INTERVAL_SECONDS,HEARTBEAT_TIMEOUT_SECONDS
|
| 9 |
+
import asyncio
|
| 10 |
+
from app.model_registry import get_model_registry,store_model
|
| 11 |
+
import time
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Logging
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
app = FastAPI()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Schéma pour les payloads JSON base64
|
| 23 |
+
class ImagePayload(BaseModel):
|
| 24 |
+
image: str
|
| 25 |
+
|
| 26 |
+
@app.on_event("startup")
|
| 27 |
+
async def startup_event():
|
| 28 |
+
logger.info("🚀 Démarrage de l'orchestrateur")
|
| 29 |
+
|
| 30 |
+
asyncio.create_task(monitor_models_async(get_model_registry()))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@app.post("/predict")
|
| 37 |
+
async def predict(
|
| 38 |
+
request: Request,
|
| 39 |
+
file: UploadFile = File(None),
|
| 40 |
+
payload: Union[ImagePayload, None] = None,
|
| 41 |
+
mode: str = Query("single", enum=["single", "voting", "automatic"], description="Mode de prédiction"),
|
| 42 |
+
show_heatmap: bool = Query(False, description="Afficher la heatmap"),
|
| 43 |
+
default_model: str = Query("efficientnetv2m", enum=["efficientnetv2m", "resnet50"], description="Modèle par défaut")
|
| 44 |
+
):
|
| 45 |
+
logger.info("🔁 Requête reçue")
|
| 46 |
+
logger.info(f"Mode : {mode}, Default model : {default_model}, Heatmap : {show_heatmap}")
|
| 47 |
+
|
| 48 |
+
try:
|
| 49 |
+
# Lire image depuis un fichier uploadé
|
| 50 |
+
if file:
|
| 51 |
+
image_bytes = await file.read()
|
| 52 |
+
logger.info(f"📂 Image reçue via multipart : {file.filename}, {len(image_bytes)} octets")
|
| 53 |
+
else:
|
| 54 |
+
# Sinon essayer de lire le JSON base64
|
| 55 |
+
body = await request.json()
|
| 56 |
+
if "image" not in body:
|
| 57 |
+
raise HTTPException(status_code=422, detail="Champ 'image' manquant.")
|
| 58 |
+
image_bytes = base64.b64decode(body["image"])
|
| 59 |
+
logger.info(f"📦 Image reçue via JSON base64 : {len(image_bytes)} octets")
|
| 60 |
+
|
| 61 |
+
# Définir les endpoints des APIs modèles
|
| 62 |
+
model_configs = get_alive_model_configs()
|
| 63 |
+
|
| 64 |
+
# Appel au vote multi-modèles
|
| 65 |
+
|
| 66 |
+
prediction = await soft_voting(model_configs, image_bytes, mode, show_heatmap, default_model)
|
| 67 |
+
return prediction
|
| 68 |
+
except HTTPException :
|
| 69 |
+
raise
|
| 70 |
+
|
| 71 |
+
except Exception as e:
|
| 72 |
+
logger.error("❌ Erreur lors du traitement de la requête", exc_info=True)
|
| 73 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@app.get("/health")
|
| 79 |
+
async def health_check():
|
| 80 |
+
return {
|
| 81 |
+
"status": "ok",
|
| 82 |
+
**get_model_status()
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
class RegisterModelRequest(BaseModel):
|
| 86 |
+
model_name: str
|
| 87 |
+
model_type: str
|
| 88 |
+
url: str
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@app.post("/register_model")
|
| 92 |
+
async def register_model(payload: RegisterModelRequest):
|
| 93 |
+
model_name = payload.model_name
|
| 94 |
+
model_type=payload.model_type
|
| 95 |
+
url = payload.url
|
| 96 |
+
|
| 97 |
+
# Si le modèle est déjà enregistré : mettre à jour son URL et le status
|
| 98 |
+
model_registry= get_model_registry()
|
| 99 |
+
if model_name in model_registry:
|
| 100 |
+
logger.info(f"🔄 Mise à jour de l'enregistrement pour {model_name}")
|
| 101 |
+
else:
|
| 102 |
+
logger.info(f"🆕 Nouveau modèle enregistré : {model_name}")
|
| 103 |
+
|
| 104 |
+
store_model(model_name, model_type,
|
| 105 |
+
url,
|
| 106 |
+
time.time(),
|
| 107 |
+
"online")
|
| 108 |
+
|
| 109 |
+
await check_and_update_model_status_once(model_name)
|
| 110 |
+
logger.info(f"✅ {model_name} enregistré avec succès")
|
| 111 |
+
return {"message": f"{model_name} enregistré avec succès"}
|
| 112 |
+
|
| 113 |
+
from pydantic import BaseModel
|
| 114 |
+
|
| 115 |
+
class HeartbeatRequest(BaseModel):
|
| 116 |
+
model_name: str
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@app.post("/heartbeat")
|
| 121 |
+
async def heartbeat(payload: HeartbeatRequest):
|
| 122 |
+
model_name = payload.model_name
|
| 123 |
+
model_registry=get_model_registry()
|
| 124 |
+
if model_name not in model_registry :
|
| 125 |
+
raise HTTPException(status_code=404, detail="Modèle inconnu")
|
| 126 |
+
model_registry[model_name]["last_seen"] = time.time()
|
| 127 |
+
model_registry[model_name]["status"] = "online"
|
| 128 |
+
logger.info(f"💓 Heartbeat reçu de {model_name}")
|
| 129 |
+
return {"message": f"Heartbeat reçu de {model_name}"}
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@app.get("/available_models")
|
| 134 |
+
async def get_available_models():
|
| 135 |
+
model_registry = get_model_registry()
|
| 136 |
+
return {
|
| 137 |
+
"models": [
|
| 138 |
+
{"name": name, "model_type" : info["model_type"],"url": info["url"], "status": info["status"]}
|
| 139 |
+
for name, info in model_registry.items()
|
| 140 |
+
if info["status"] == "online"
|
| 141 |
+
]
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
|
app/model_registry.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict
|
| 2 |
+
model_registry: Dict[str, Dict] = {}
|
| 3 |
+
|
| 4 |
+
def get_model_registry():
|
| 5 |
+
return model_registry
|
| 6 |
+
|
| 7 |
+
def store_model(name: str, model_type: str, url: str,last_seen,status: str):
|
| 8 |
+
model_registry[name] = {
|
| 9 |
+
"model_name": name,
|
| 10 |
+
"model_type":model_type,
|
| 11 |
+
"url": url,
|
| 12 |
+
"last_seen": last_seen,
|
| 13 |
+
"status": status
|
| 14 |
+
}
|
app/model_status.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import logging
|
| 3 |
+
import httpx
|
| 4 |
+
import time
|
| 5 |
+
from app.config import HEALTH_CHECK_INTERVAL,HEALTHCHECK_INTERVAL_SECONDS,HEARTBEAT_TIMEOUT_SECONDS
|
| 6 |
+
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
# Stockage global
|
| 10 |
+
alive_models = []
|
| 11 |
+
dead_models = []
|
| 12 |
+
|
| 13 |
+
async def ping_api(session, config):
|
| 14 |
+
try:
|
| 15 |
+
async with session.get(config["health_url"], timeout=2) as resp:
|
| 16 |
+
return resp.status == 200
|
| 17 |
+
except Exception:
|
| 18 |
+
return False
|
| 19 |
+
|
| 20 |
+
def get_model_status():
|
| 21 |
+
return {
|
| 22 |
+
"alive_models": [m["model_name"] for m in alive_models],
|
| 23 |
+
"dead_models": [m["model_name"] for m in dead_models]
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
def get_alive_model_configs():
|
| 27 |
+
return alive_models
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
async def monitor_models_async(model_registry):
|
| 31 |
+
global alive_models, dead_models
|
| 32 |
+
while True:
|
| 33 |
+
now=time.time()
|
| 34 |
+
temp_alive = []
|
| 35 |
+
temp_dead = []
|
| 36 |
+
for name, info in model_registry.items():
|
| 37 |
+
time_diff = now - info["last_seen"]
|
| 38 |
+
if time_diff > HEARTBEAT_TIMEOUT_SECONDS:
|
| 39 |
+
try:
|
| 40 |
+
health_url = info["url"].replace("/predict", "/health")
|
| 41 |
+
async with httpx.AsyncClient(timeout=3) as client:
|
| 42 |
+
resp = await client.get(health_url)
|
| 43 |
+
if resp.status_code == 200:
|
| 44 |
+
model_registry[name]["status"] = "online"
|
| 45 |
+
else:
|
| 46 |
+
model_registry[name]["status"] = "offline"
|
| 47 |
+
except:
|
| 48 |
+
model_registry[name]["status"] = "offline"
|
| 49 |
+
if model_registry[name]["status"] == "online":
|
| 50 |
+
temp_alive.append(model_registry[name])
|
| 51 |
+
else:
|
| 52 |
+
temp_dead.append(model_registry[name])
|
| 53 |
+
|
| 54 |
+
alive_models =temp_alive
|
| 55 |
+
dead_models = temp_dead
|
| 56 |
+
alive=[m["model_name"] for m in alive_models]
|
| 57 |
+
dead=[m["model_name"] for m in dead_models]
|
| 58 |
+
|
| 59 |
+
logger.info(f"✅ Modèles actifs : {alive}")
|
| 60 |
+
logger.warning(f"❌ Modèles inactifs : {dead}")
|
| 61 |
+
|
| 62 |
+
await asyncio.sleep(HEALTHCHECK_INTERVAL_SECONDS)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
import httpx
|
| 66 |
+
from app.model_registry import get_model_registry
|
| 67 |
+
|
| 68 |
+
async def check_and_update_model_status_once(model_name: str):
|
| 69 |
+
global alive_models, dead_models
|
| 70 |
+
registry = get_model_registry()
|
| 71 |
+
info = registry.get(model_name)
|
| 72 |
+
if not info:
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
health_url = info["url"].replace("/predict", "/health")
|
| 77 |
+
async with httpx.AsyncClient(timeout=3) as client:
|
| 78 |
+
resp = await client.get(health_url)
|
| 79 |
+
status = "online" if resp.status_code == 200 else "offline"
|
| 80 |
+
except:
|
| 81 |
+
status = "offline"
|
| 82 |
+
|
| 83 |
+
info["status"] = status
|
| 84 |
+
# 🔁 Mettre à jour les listes globales
|
| 85 |
+
temp_alive = []
|
| 86 |
+
temp_dead = []
|
| 87 |
+
for name, m_info in registry.items():
|
| 88 |
+
if m_info.get("status") == "online":
|
| 89 |
+
temp_alive.append(m_info)
|
| 90 |
+
else:
|
| 91 |
+
temp_dead.append(m_info)
|
| 92 |
+
|
| 93 |
+
alive_models = temp_alive
|
| 94 |
+
dead_models = temp_dead
|
| 95 |
+
|
| 96 |
+
logger.info(f"🔁 Mise à jour unique pour `{model_name}` → {status}")
|
| 97 |
+
logger.info(f"✅ Modèles actifs : {[m['model_name'] for m in alive_models]}")
|
| 98 |
+
logger.warning(f"❌ Modèles inactifs : {[m['model_name'] for m in dead_models]}")
|
app/utils.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils.py ou dans main.py
|
| 2 |
+
|
| 3 |
+
import aiohttp
|
| 4 |
+
import asyncio
|
| 5 |
+
import numpy as np
|
| 6 |
+
from scipy.spatial.distance import jensenshannon
|
| 7 |
+
|
| 8 |
+
shannon_threashold=0.15
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def compute_js_divergence(all_probs):
|
| 12 |
+
"""
|
| 13 |
+
Calcule la divergence de Jensen-Shannon sur une liste de distributions de probabilités.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
all_probs (list of np.array): Liste des prédictions de chaque modèle (softmax).
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
float: La divergence de Jensen-Shannon entre les modèles.
|
| 20 |
+
"""
|
| 21 |
+
if len(all_probs) < 2:
|
| 22 |
+
return 0.0 # Pas de désaccord possible avec un seul modèle
|
| 23 |
+
|
| 24 |
+
# Convertir la liste en tableau numpy (shape: [nb_modèles, nb_classes])
|
| 25 |
+
probs_array = np.array(all_probs)
|
| 26 |
+
|
| 27 |
+
# Calculer la moyenne des distributions (distribution "moyenne")
|
| 28 |
+
mean_probs = np.mean(probs_array, axis=0)
|
| 29 |
+
|
| 30 |
+
# Calculer la JSD entre chaque modèle et la moyenne
|
| 31 |
+
jsd_values = []
|
| 32 |
+
for probs in probs_array:
|
| 33 |
+
jsd = jensenshannon(probs, mean_probs, base=2) # base=2 : divergence bornée entre 0 et 1
|
| 34 |
+
jsd_values.append(jsd)
|
| 35 |
+
|
| 36 |
+
# Retourner la moyenne des divergences
|
| 37 |
+
return np.mean(jsd_values)
|
| 38 |
+
|
| 39 |
+
# Si js_divergence > 0.1 → Désaccord modéré
|
| 40 |
+
|
| 41 |
+
def compute_entropy_safe(probas):
|
| 42 |
+
probas = np.array(probas)
|
| 43 |
+
# On garde uniquement les probabilités strictement positives
|
| 44 |
+
mask = probas > 0
|
| 45 |
+
entropy = -(np.sum(probas[mask] * np.log(probas[mask])))
|
| 46 |
+
return entropy
|
app/voting.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import aiohttp
|
| 2 |
+
import numpy as np
|
| 3 |
+
import logging
|
| 4 |
+
from app.utils import compute_js_divergence,shannon_threashold,compute_entropy_safe
|
| 5 |
+
from app.model_status import monitor_models_async,check_and_update_model_status_once
|
| 6 |
+
from app.model_registry import get_model_registry
|
| 7 |
+
from fastapi import HTTPException
|
| 8 |
+
|
| 9 |
+
logging.basicConfig(
|
| 10 |
+
level=logging.DEBUG, # ou logging.DEBUG
|
| 11 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
| 12 |
+
)
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
async def call_model_api(config, image_bytes, show_heatmap):
|
| 17 |
+
async with aiohttp.ClientSession() as session:
|
| 18 |
+
try:
|
| 19 |
+
files = {
|
| 20 |
+
'file': image_bytes
|
| 21 |
+
}
|
| 22 |
+
params = {
|
| 23 |
+
'show_heatmap': str(show_heatmap).lower()
|
| 24 |
+
}
|
| 25 |
+
async with session.post(config["url"], data=files, params=params) as resp:
|
| 26 |
+
if resp.status != 200:
|
| 27 |
+
logger.warning(f"⚠️ Échec de l'appel à {config['model_name']} : {resp.status}")
|
| 28 |
+
return None
|
| 29 |
+
return await resp.json()
|
| 30 |
+
except Exception as e:
|
| 31 |
+
logger.error(f"❌ Erreur lors de l'appel à l'API {config['model_name']}: {e}")
|
| 32 |
+
await check_and_update_model_status_once(config["model_name"])
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
async def soft_voting(model_configs, image_bytes: bytes, mode, show_heatmap, default_model):
|
| 37 |
+
logger.info("🔁 Début de la prédiction multi-modèles")
|
| 38 |
+
|
| 39 |
+
all_probs = []
|
| 40 |
+
models = []
|
| 41 |
+
models_predictions = []
|
| 42 |
+
models_confidences = []
|
| 43 |
+
models_entropies = []
|
| 44 |
+
models_uncertainties = []
|
| 45 |
+
models_heatmaps = []
|
| 46 |
+
|
| 47 |
+
default_config = next((cfg for cfg in model_configs if cfg["model_name"].lower() == default_model.lower()), None)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if not default_config:
|
| 51 |
+
logger.warning(f"❌ Modèle par défaut '{default_model}' non trouvé ou indisponible.")
|
| 52 |
+
if mode == "single":
|
| 53 |
+
raise HTTPException(
|
| 54 |
+
status_code=400,
|
| 55 |
+
detail=f"Modèle par défaut '{default_model}' indisponible. Veuillez en choisir un autre ou réessayer plus tard.")
|
| 56 |
+
|
| 57 |
+
else:
|
| 58 |
+
logger.info(f"🔁 Fallback : passage automatique en mode 'voting'")
|
| 59 |
+
|
| 60 |
+
mode = "voting"
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# 🔰 Étape 1 : prédiction avec le modèle par défaut
|
| 64 |
+
logger.info(f"🚀 Prédiction avec le modèle par défaut : {default_model}")
|
| 65 |
+
prediction = await call_model_api(default_config, image_bytes, show_heatmap)
|
| 66 |
+
if prediction is None:
|
| 67 |
+
raise Exception("Aucune réponse du modèle par défaut.")
|
| 68 |
+
|
| 69 |
+
all_probs.append(np.array(prediction["preds"]))
|
| 70 |
+
models_predictions.append(prediction["predicted_class"])
|
| 71 |
+
models_confidences.append(prediction["confidence"])
|
| 72 |
+
models_entropies.append(prediction["entropy"])
|
| 73 |
+
models_uncertainties.append(prediction["is_uncertain_model"])
|
| 74 |
+
logger.debug(f"🧠 Incertitude du modèle ajoutée : {prediction['is_uncertain_model']}")
|
| 75 |
+
|
| 76 |
+
models.append(default_config["model_name"])
|
| 77 |
+
|
| 78 |
+
if show_heatmap:
|
| 79 |
+
heatmap = prediction.get("heatmap")
|
| 80 |
+
if heatmap:
|
| 81 |
+
models_heatmaps.append(heatmap)
|
| 82 |
+
|
| 83 |
+
mean_probs = np.mean(all_probs, axis=0)
|
| 84 |
+
final_class = int(np.argmax(mean_probs))
|
| 85 |
+
final_confidence = float(mean_probs[final_class])
|
| 86 |
+
entropy = float(compute_entropy_safe(mean_probs))
|
| 87 |
+
jsd_score = float(compute_js_divergence(all_probs))
|
| 88 |
+
|
| 89 |
+
# 🛑 Mode 'single' → retour immédiat
|
| 90 |
+
if mode == "single":
|
| 91 |
+
is_global_uncertain = models_uncertainties[0]
|
| 92 |
+
logger.info("🛑 Mode 'single' activé")
|
| 93 |
+
return {
|
| 94 |
+
"predicted_class": final_class,
|
| 95 |
+
"confidence": final_confidence,
|
| 96 |
+
"entropy": entropy,
|
| 97 |
+
"jsd_score": jsd_score,
|
| 98 |
+
"models": models,
|
| 99 |
+
"is_global_uncertain": is_global_uncertain,
|
| 100 |
+
"models_predictions": models_predictions,
|
| 101 |
+
"models_confidences": models_confidences,
|
| 102 |
+
"models_entropies": models_entropies,
|
| 103 |
+
"models_uncertainties": models_uncertainties,
|
| 104 |
+
"models_heatmaps": models_heatmaps
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
# 🧠 Mode 'automatic' → si confiance suffisante on s'arrête
|
| 108 |
+
if mode == "automatic" and prediction["confidence"] >= 0.90:
|
| 109 |
+
is_global_uncertain = models_uncertainties[0]
|
| 110 |
+
logger.info("✅ Confiance suffisante, pas de vote")
|
| 111 |
+
return {
|
| 112 |
+
"predicted_class": final_class,
|
| 113 |
+
"confidence": final_confidence,
|
| 114 |
+
"entropy": entropy,
|
| 115 |
+
"jsd_score": jsd_score,
|
| 116 |
+
"models": models,
|
| 117 |
+
"is_global_uncertain": is_global_uncertain,
|
| 118 |
+
"models_predictions": models_predictions,
|
| 119 |
+
"models_confidences": models_confidences,
|
| 120 |
+
"models_entropies": models_entropies,
|
| 121 |
+
"models_uncertainties": models_uncertainties,
|
| 122 |
+
"models_heatmaps": models_heatmaps
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
# 🔁 Sinon → on complète avec les autres modèles
|
| 126 |
+
logger.info(f"🔍 Mode '{mode}' : on appelle les autres modèles")
|
| 127 |
+
|
| 128 |
+
for config in model_configs:
|
| 129 |
+
if config["model_name"].lower() == default_model.lower():
|
| 130 |
+
continue
|
| 131 |
+
|
| 132 |
+
prediction = await call_model_api(config, image_bytes, show_heatmap)
|
| 133 |
+
if not prediction:
|
| 134 |
+
continue
|
| 135 |
+
|
| 136 |
+
all_probs.append(np.array(prediction["preds"]))
|
| 137 |
+
models_predictions.append(prediction["predicted_class"])
|
| 138 |
+
models_confidences.append(prediction["confidence"])
|
| 139 |
+
models_entropies.append(prediction["entropy"])
|
| 140 |
+
models_uncertainties.append(prediction["is_uncertain_model"])
|
| 141 |
+
models.append(config["model_name"])
|
| 142 |
+
|
| 143 |
+
if show_heatmap and prediction.get("heatmap"):
|
| 144 |
+
models_heatmaps.append(prediction["heatmap"])
|
| 145 |
+
|
| 146 |
+
mean_probs = np.mean(all_probs, axis=0)
|
| 147 |
+
final_class = int(np.argmax(mean_probs))
|
| 148 |
+
final_confidence = float(mean_probs[final_class])
|
| 149 |
+
entropy = float(compute_entropy_safe(mean_probs))
|
| 150 |
+
jsd_score = float(compute_js_divergence(all_probs))
|
| 151 |
+
is_global_uncertain = any(models_uncertainties) and jsd_score > shannon_threashold
|
| 152 |
+
|
| 153 |
+
logger.info("✅ Vote terminé")
|
| 154 |
+
return {
|
| 155 |
+
"predicted_class": final_class,
|
| 156 |
+
"confidence": final_confidence,
|
| 157 |
+
"entropy": entropy,
|
| 158 |
+
"jsd_score": jsd_score,
|
| 159 |
+
"models": models,
|
| 160 |
+
"is_global_uncertain": is_global_uncertain,
|
| 161 |
+
"models_predictions": models_predictions,
|
| 162 |
+
"models_confidences": models_confidences,
|
| 163 |
+
"models_entropies": models_entropies,
|
| 164 |
+
"models_uncertainties": models_uncertainties,
|
| 165 |
+
"models_heatmaps": models_heatmaps
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
|
app/voting_save_para.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import aiohttp
|
| 2 |
+
import numpy as np
|
| 3 |
+
import logging
|
| 4 |
+
from app.utils import compute_js_divergence,shannon_threashold,compute_entropy_safe
|
| 5 |
+
from app.model_status import monitor_models_async,check_and_update_model_status_once
|
| 6 |
+
from app.model_registry import get_model_registry
|
| 7 |
+
from fastapi import HTTPException
|
| 8 |
+
import asyncio
|
| 9 |
+
import numpy as np
|
| 10 |
+
from fastapi import HTTPException
|
| 11 |
+
|
| 12 |
+
logging.basicConfig(
|
| 13 |
+
level=logging.DEBUG, # ou logging.DEBUG
|
| 14 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
| 15 |
+
)
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
async def call_model_api(config, image_bytes, show_heatmap):
|
| 20 |
+
async with aiohttp.ClientSession() as session:
|
| 21 |
+
try:
|
| 22 |
+
files = {
|
| 23 |
+
'file': image_bytes
|
| 24 |
+
}
|
| 25 |
+
params = {
|
| 26 |
+
'show_heatmap': str(show_heatmap).lower()
|
| 27 |
+
}
|
| 28 |
+
async with session.post(config["url"], data=files, params=params) as resp:
|
| 29 |
+
if resp.status != 200:
|
| 30 |
+
logger.warning(f"⚠️ Échec de l'appel à {config['model_name']} : {resp.status}")
|
| 31 |
+
return None
|
| 32 |
+
return await resp.json()
|
| 33 |
+
except Exception as e:
|
| 34 |
+
logger.error(f"❌ Erreur lors de l'appel à l'API {config['model_name']}: {e}")
|
| 35 |
+
await check_and_update_model_status_once(config["model_name"])
|
| 36 |
+
return None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
async def soft_voting(model_configs, image_bytes: bytes, mode, show_heatmap, default_model):
|
| 41 |
+
logger.info("🔁 Début de la prédiction hybride multi-modèles")
|
| 42 |
+
|
| 43 |
+
default_config = next((cfg for cfg in model_configs if cfg["model_name"].lower() == default_model.lower()), None)
|
| 44 |
+
if not default_config:
|
| 45 |
+
raise HTTPException(400, f"Modèle par défaut '{default_model}' indisponible.")
|
| 46 |
+
|
| 47 |
+
logger.info(f"🚀 Prédiction avec le modèle par défaut : {default_model}")
|
| 48 |
+
default_pred = await call_model_api(default_config, image_bytes, show_heatmap)
|
| 49 |
+
if not default_pred:
|
| 50 |
+
raise Exception("Aucune réponse du modèle par défaut.")
|
| 51 |
+
|
| 52 |
+
if mode == "single" or (mode == "automatic" and default_pred["confidence"] >= 0.90):
|
| 53 |
+
logger.info("✅ Mode 'single' ou confiance suffisante en 'automatic'")
|
| 54 |
+
return {
|
| 55 |
+
"predicted_class": default_pred["predicted_class"],
|
| 56 |
+
"confidence": default_pred["confidence"],
|
| 57 |
+
"entropy": default_pred["entropy"],
|
| 58 |
+
"jsd_score": 0.0,
|
| 59 |
+
"models": [default_config["model_name"]],
|
| 60 |
+
"is_global_uncertain": default_pred["is_uncertain_model"],
|
| 61 |
+
"models_predictions": [default_pred["predicted_class"]],
|
| 62 |
+
"models_confidences": [default_pred["confidence"]],
|
| 63 |
+
"models_entropies": [default_pred["entropy"]],
|
| 64 |
+
"models_uncertainties": [default_pred["is_uncertain_model"]],
|
| 65 |
+
"models_heatmaps": [default_pred.get("heatmap")] if show_heatmap else []
|
| 66 |
+
}
|
| 67 |
+
if mode!="voting":
|
| 68 |
+
logger.info("📡 Confiance insuffisante → prédiction en parallèle des autres modèles")
|
| 69 |
+
else:
|
| 70 |
+
logger.info("📡 prédiction vote en parallèle")
|
| 71 |
+
|
| 72 |
+
async def call_model(config):
|
| 73 |
+
pred = await call_model_api(config, image_bytes, show_heatmap)
|
| 74 |
+
return (config["model_name"], pred) if pred else None
|
| 75 |
+
|
| 76 |
+
other_configs = [cfg for cfg in model_configs if cfg["model_name"].lower() != default_model.lower()]
|
| 77 |
+
results = await asyncio.gather(*(call_model(cfg) for cfg in other_configs))
|
| 78 |
+
results = [(default_config["model_name"], default_pred)] + [r for r in results if r]
|
| 79 |
+
|
| 80 |
+
# Fusion des prédictions
|
| 81 |
+
all_probs, models, models_predictions, models_confidences = [], [], [], []
|
| 82 |
+
models_entropies, models_uncertainties, models_heatmaps = [], [], []
|
| 83 |
+
|
| 84 |
+
for model_name, pred in results:
|
| 85 |
+
all_probs.append(np.array(pred["preds"]))
|
| 86 |
+
models.append(model_name)
|
| 87 |
+
models_predictions.append(pred["predicted_class"])
|
| 88 |
+
models_confidences.append(pred["confidence"])
|
| 89 |
+
models_entropies.append(pred["entropy"])
|
| 90 |
+
models_uncertainties.append(pred["is_uncertain_model"])
|
| 91 |
+
if show_heatmap and pred.get("heatmap"):
|
| 92 |
+
models_heatmaps.append(pred["heatmap"])
|
| 93 |
+
|
| 94 |
+
mean_probs = np.mean(all_probs, axis=0)
|
| 95 |
+
final_class = int(np.argmax(mean_probs))
|
| 96 |
+
final_confidence = float(mean_probs[final_class])
|
| 97 |
+
entropy = float(compute_entropy_safe(mean_probs))
|
| 98 |
+
jsd_score = float(compute_js_divergence(all_probs))
|
| 99 |
+
is_global_uncertain = any(models_uncertainties) and jsd_score > shannon_threashold
|
| 100 |
+
|
| 101 |
+
logger.info("✅ Vote hybride terminé")
|
| 102 |
+
return {
|
| 103 |
+
"predicted_class": final_class,
|
| 104 |
+
"confidence": final_confidence,
|
| 105 |
+
"entropy": entropy,
|
| 106 |
+
"jsd_score": jsd_score,
|
| 107 |
+
"models": models,
|
| 108 |
+
"is_global_uncertain": is_global_uncertain,
|
| 109 |
+
"models_predictions": models_predictions,
|
| 110 |
+
"models_confidences": models_confidences,
|
| 111 |
+
"models_entropies": models_entropies,
|
| 112 |
+
"models_uncertainties": models_uncertainties,
|
| 113 |
+
"models_heatmaps": models_heatmaps
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==1.4.0
|
| 2 |
+
aiohappyeyeballs==2.6.1
|
| 3 |
+
aiohttp==3.12.13
|
| 4 |
+
aiosignal==1.3.2
|
| 5 |
+
annotated-types==0.7.0
|
| 6 |
+
anyio==4.9.0
|
| 7 |
+
asttokens==3.0.0
|
| 8 |
+
astunparse==1.6.3
|
| 9 |
+
attrs==25.3.0
|
| 10 |
+
certifi==2025.6.15
|
| 11 |
+
charset-normalizer==3.4.2
|
| 12 |
+
click==8.2.1
|
| 13 |
+
comm==0.2.2
|
| 14 |
+
contourpy==1.3.2
|
| 15 |
+
cycler==0.12.1
|
| 16 |
+
debugpy==1.8.14
|
| 17 |
+
decorator==5.2.1
|
| 18 |
+
Deprecated==1.2.18
|
| 19 |
+
dm-tree==0.1.9
|
| 20 |
+
executing==2.2.0
|
| 21 |
+
fastapi==0.115.12
|
| 22 |
+
flatbuffers==25.2.10
|
| 23 |
+
fonttools==4.58.4
|
| 24 |
+
frozenlist==1.7.0
|
| 25 |
+
gast==0.6.0
|
| 26 |
+
google-pasta==0.2.0
|
| 27 |
+
grpcio==1.73.0
|
| 28 |
+
h11==0.16.0
|
| 29 |
+
h5py==3.14.0
|
| 30 |
+
idna==3.10
|
| 31 |
+
imageio==2.37.0
|
| 32 |
+
jedi==0.19.2
|
| 33 |
+
jupyter_client==8.6.3
|
| 34 |
+
jupyter_core==5.8.1
|
| 35 |
+
keras==3.10.0
|
| 36 |
+
kiwisolver==1.4.8
|
| 37 |
+
libclang==18.1.1
|
| 38 |
+
Markdown==3.8
|
| 39 |
+
markdown-it-py==3.0.0
|
| 40 |
+
MarkupSafe==3.0.2
|
| 41 |
+
matplotlib==3.10.3
|
| 42 |
+
matplotlib-inline==0.1.7
|
| 43 |
+
mdurl==0.1.2
|
| 44 |
+
ml_dtypes==0.5.1
|
| 45 |
+
multidict==6.4.4
|
| 46 |
+
namex==0.1.0
|
| 47 |
+
nest-asyncio==1.6.0
|
| 48 |
+
numpy==1.26.4
|
| 49 |
+
opencv-python==4.11.0.86
|
| 50 |
+
opt_einsum==3.4.0
|
| 51 |
+
optree==0.16.0
|
| 52 |
+
packaging==25.0
|
| 53 |
+
parso==0.8.4
|
| 54 |
+
pexpect==4.9.0
|
| 55 |
+
pillow==11.2.1
|
| 56 |
+
platformdirs==4.3.8
|
| 57 |
+
prompt_toolkit==3.0.51
|
| 58 |
+
propcache==0.3.2
|
| 59 |
+
protobuf==5.29.5
|
| 60 |
+
psutil==7.0.0
|
| 61 |
+
ptyprocess==0.7.0
|
| 62 |
+
pure_eval==0.2.3
|
| 63 |
+
pydantic==2.11.7
|
| 64 |
+
pydantic_core==2.33.2
|
| 65 |
+
Pygments==2.19.1
|
| 66 |
+
pyparsing==3.2.3
|
| 67 |
+
python-dateutil==2.9.0.post0
|
| 68 |
+
python-multipart==0.0.20
|
| 69 |
+
pyzmq==27.0.0
|
| 70 |
+
requests==2.32.4
|
| 71 |
+
rich==14.0.0
|
| 72 |
+
scipy==1.11.4
|
| 73 |
+
setuptools==80.9.0
|
| 74 |
+
six==1.17.0
|
| 75 |
+
sniffio==1.3.1
|
| 76 |
+
stack-data==0.6.3
|
| 77 |
+
starlette==0.46.2
|
| 78 |
+
termcolor==3.1.0
|
| 79 |
+
tornado==6.5.1
|
| 80 |
+
traitlets==5.14.3
|
| 81 |
+
typing-inspection==0.4.1
|
| 82 |
+
typing_extensions==4.14.0
|
| 83 |
+
urllib3==2.4.0
|
| 84 |
+
uvicorn==0.34.3
|
| 85 |
+
wcwidth==0.2.13
|
| 86 |
+
Werkzeug==3.1.3
|
| 87 |
+
wheel==0.45.1
|
| 88 |
+
wrapt==1.17.2
|
| 89 |
+
yarl==1.20.1
|