Spaces:
Paused
Paused
lojol469-cmd
commited on
Commit
·
93aade3
1
Parent(s):
f3a56a5
Déploiement de l'API Kibali avec Docker
Browse files- Dockerfile +17 -26
- kibali-ui/index.html +10 -4
- kibali-ui/public/kibali_logo.svg +48 -0
- kibali-ui/public/manifest.json +15 -0
- kibali-ui/public/vite.svg +0 -1
- main.py +70 -110
Dockerfile
CHANGED
|
@@ -1,18 +1,13 @@
|
|
| 1 |
-
#
|
| 2 |
-
FROM
|
| 3 |
-
|
| 4 |
-
COPY kibali-ui/package*.json ./
|
| 5 |
-
RUN npm install
|
| 6 |
-
COPY kibali-ui/ ./
|
| 7 |
-
RUN npm run build
|
| 8 |
-
|
| 9 |
-
# --- STAGE 2 : Backend (Base NVIDIA Blackwell Compatible) ---
|
| 10 |
-
# On utilise une base 12.6 qui supporte les drivers de la série 50
|
| 11 |
-
FROM nvidia/cuda:12.6.1-runtime-ubuntu22.04
|
| 12 |
WORKDIR /app
|
| 13 |
|
| 14 |
ENV DEBIAN_FRONTEND=noninteractive
|
|
|
|
|
|
|
| 15 |
|
|
|
|
| 16 |
RUN apt-get update && apt-get install -y \
|
| 17 |
python3-pip \
|
| 18 |
python3-dev \
|
|
@@ -20,27 +15,23 @@ RUN apt-get update && apt-get install -y \
|
|
| 20 |
git \
|
| 21 |
&& rm -rf /var/lib/apt/lists/*
|
| 22 |
|
| 23 |
-
#
|
| 24 |
-
# C'est ici qu'on débloque le support sm_120
|
| 25 |
RUN pip3 install --no-cache-dir --upgrade pip
|
| 26 |
-
RUN pip3 install --
|
| 27 |
|
| 28 |
-
#
|
| 29 |
COPY requirements.txt .
|
| 30 |
RUN pip3 install --no-cache-dir -r requirements.txt
|
|
|
|
| 31 |
|
| 32 |
-
#
|
| 33 |
-
RUN pip3 install --upgrade transformers accelerate bitsandbytes
|
| 34 |
-
|
| 35 |
-
COPY --from=build-frontend /app/frontend/dist ./static
|
| 36 |
COPY . .
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
|
| 41 |
-
ENV PYTHONUNBUFFERED=1
|
| 42 |
-
ENV MODEL_PATH=/app/model_cache
|
| 43 |
|
| 44 |
-
|
|
|
|
| 45 |
|
| 46 |
-
|
|
|
|
|
|
| 1 |
+
# Utilisation d'une base CUDA optimisée pour les performances
|
| 2 |
+
FROM nvidia/cuda:12.4.1-runtime-ubuntu22.04
|
| 3 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
WORKDIR /app
|
| 5 |
|
| 6 |
ENV DEBIAN_FRONTEND=noninteractive
|
| 7 |
+
ENV PYTHONUNBUFFERED=1
|
| 8 |
+
ENV MODEL_PATH=/app/model_cache
|
| 9 |
|
| 10 |
+
# Installation des dépendances système
|
| 11 |
RUN apt-get update && apt-get install -y \
|
| 12 |
python3-pip \
|
| 13 |
python3-dev \
|
|
|
|
| 15 |
git \
|
| 16 |
&& rm -rf /var/lib/apt/lists/*
|
| 17 |
|
| 18 |
+
# Installation de PyTorch et des bibliothèques de calcul
|
|
|
|
| 19 |
RUN pip3 install --no-cache-dir --upgrade pip
|
| 20 |
+
RUN pip3 install --no-cache-dir torch torchvision torchaudio
|
| 21 |
|
| 22 |
+
# Copie et installation des dépendances Python
|
| 23 |
COPY requirements.txt .
|
| 24 |
RUN pip3 install --no-cache-dir -r requirements.txt
|
| 25 |
+
RUN pip3 install --upgrade transformers accelerate bitsandbytes sentence-transformers faiss-cpu
|
| 26 |
|
| 27 |
+
# Copie de tout le projet (incluant tools/, static/, etc.)
|
|
|
|
|
|
|
|
|
|
| 28 |
COPY . .
|
| 29 |
|
| 30 |
+
# Création des dossiers nécessaires avec les bons droits
|
| 31 |
+
RUN mkdir -p /app/model_cache /app/static /app/data && chmod -R 777 /app
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
# Port imposé par Hugging Face Spaces
|
| 34 |
+
EXPOSE 7860
|
| 35 |
|
| 36 |
+
# Lancement de l'API sur le port 7860
|
| 37 |
+
CMD ["python3", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
kibali-ui/index.html
CHANGED
|
@@ -1,13 +1,19 @@
|
|
| 1 |
<!doctype html>
|
| 2 |
-
<html lang="
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8" />
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 7 |
-
<title>
|
| 8 |
</head>
|
| 9 |
<body>
|
| 10 |
<div id="root"></div>
|
| 11 |
<script type="module" src="/src/main.jsx"></script>
|
| 12 |
</body>
|
| 13 |
-
</html>
|
|
|
|
| 1 |
<!doctype html>
|
| 2 |
+
<html lang="fr">
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8" />
|
| 5 |
+
|
| 6 |
+
<link rel="icon" type="image/svg+xml" href="/kibali_logo.svg?v=2" />
|
| 7 |
+
|
| 8 |
+
<link rel="apple-touch-icon" href="/kibali_logo.svg?v=2" />
|
| 9 |
+
|
| 10 |
+
<link rel="manifest" href="/manifest.json?v=2" />
|
| 11 |
+
|
| 12 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 13 |
+
<meta name="theme-color" content="#7c3aed" /> <title>Kibali AI</title>
|
| 14 |
</head>
|
| 15 |
<body>
|
| 16 |
<div id="root"></div>
|
| 17 |
<script type="module" src="/src/main.jsx"></script>
|
| 18 |
</body>
|
| 19 |
+
</html>
|
kibali-ui/public/kibali_logo.svg
ADDED
|
|
kibali-ui/public/manifest.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"short_name": "Kibali",
|
| 3 |
+
"name": "Kibali AI Assistant",
|
| 4 |
+
"icons": [
|
| 5 |
+
{
|
| 6 |
+
"src": "/kibali_logo.svg",
|
| 7 |
+
"type": "image/svg+xml",
|
| 8 |
+
"sizes": "512x512"
|
| 9 |
+
}
|
| 10 |
+
],
|
| 11 |
+
"start_url": ".",
|
| 12 |
+
"display": "standalone",
|
| 13 |
+
"theme_color": "#000000",
|
| 14 |
+
"background_color": "#ffffff"
|
| 15 |
+
}
|
kibali-ui/public/vite.svg
DELETED
main.py
CHANGED
|
@@ -54,20 +54,25 @@ app.add_middleware(
|
|
| 54 |
allow_headers=["*"],
|
| 55 |
)
|
| 56 |
|
| 57 |
-
# --- CHARGEMENT DES MODÈLES ---
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
| 60 |
|
| 61 |
logger.info("Chargement du modèle d'embedding...")
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
-
logger.info("Chargement du tokenizer et du modèle LLM...")
|
| 66 |
-
# Suppression de local_files_only=True pour permettre la compatibilité initiale avec nouvelles architectures GPU
|
| 67 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 68 |
if tokenizer.pad_token is None:
|
| 69 |
tokenizer.pad_token = tokenizer.eos_token
|
| 70 |
|
|
|
|
| 71 |
bnb_config = BitsAndBytesConfig(
|
| 72 |
load_in_4bit=True,
|
| 73 |
bnb_4bit_use_double_quant=True,
|
|
@@ -75,25 +80,30 @@ bnb_config = BitsAndBytesConfig(
|
|
| 75 |
bnb_4bit_compute_dtype=torch.float16
|
| 76 |
)
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
# --- BASES VECTORIELLES GLOBALES ---
|
| 89 |
dimension = 384
|
| 90 |
doc_index = faiss.IndexFlatL2(dimension)
|
| 91 |
doc_chunks: List[str] = []
|
| 92 |
-
doc_metadata: List[dict] = []
|
| 93 |
|
| 94 |
memory_index = faiss.IndexFlatL2(dimension)
|
| 95 |
memory_texts: List[str] = []
|
| 96 |
-
memory_metadata: List[dict] = []
|
| 97 |
|
| 98 |
# --- GESTION DU CONTEXTE CONVERSATIONNEL ---
|
| 99 |
class ConversationContext:
|
|
@@ -103,15 +113,13 @@ class ConversationContext:
|
|
| 103 |
self.subject_start_time = None
|
| 104 |
self.message_count = 0
|
| 105 |
self.subject_keywords = []
|
| 106 |
-
|
| 107 |
def update_subject(self, message: str, embedding: np.ndarray):
|
| 108 |
-
"""Détecte et met à jour le sujet actuel de la conversation"""
|
| 109 |
keywords = self._extract_keywords(message)
|
| 110 |
-
|
| 111 |
-
# Détection de changement de sujet
|
| 112 |
if self.subject_embedding is not None:
|
| 113 |
similarity = np.dot(embedding.flatten(), self.subject_embedding.flatten())
|
| 114 |
-
if similarity < 0.6:
|
| 115 |
logger.info(f"Changement de sujet détecté (similarité: {similarity:.2f})")
|
| 116 |
self._archive_current_subject()
|
| 117 |
self.current_subject = message
|
|
@@ -122,26 +130,23 @@ class ConversationContext:
|
|
| 122 |
else:
|
| 123 |
self.message_count += 1
|
| 124 |
self.subject_keywords.extend(keywords)
|
| 125 |
-
self.subject_keywords = list(set(self.subject_keywords))[:10]
|
| 126 |
else:
|
| 127 |
self.current_subject = message
|
| 128 |
self.subject_embedding = embedding
|
| 129 |
self.subject_start_time = datetime.now()
|
| 130 |
self.message_count = 1
|
| 131 |
self.subject_keywords = keywords
|
| 132 |
-
|
| 133 |
def _extract_keywords(self, text: str) -> List[str]:
|
| 134 |
-
|
| 135 |
-
stopwords = {'le', 'la', 'les', 'un', 'une', 'des', 'de', 'du', 'et', 'ou',
|
| 136 |
'est', 'sont', 'à', 'au', 'en', 'pour', 'dans', 'sur', 'avec'}
|
| 137 |
words = text.lower().split()
|
| 138 |
keywords = [w for w in words if len(w) > 3 and w not in stopwords]
|
| 139 |
return keywords[:5]
|
| 140 |
-
|
| 141 |
def _archive_current_subject(self):
|
| 142 |
-
"""Archive le sujet actuel avant de passer au suivant"""
|
| 143 |
if self.current_subject and memory_index.ntotal > 0:
|
| 144 |
-
# Créer un résumé du sujet archivé
|
| 145 |
summary = {
|
| 146 |
"subject": self.current_subject[:200],
|
| 147 |
"keywords": self.subject_keywords,
|
|
@@ -202,16 +207,12 @@ def chunk_text(text: str, chunk_size: int = 400, overlap: int = 50) -> List[str]
|
|
| 202 |
return chunks
|
| 203 |
|
| 204 |
def add_to_memory_realtime(user_msg: str, ai_response: str, subject_keywords: List[str]):
|
| 205 |
-
"""Ajoute une entrée mémoire en temps réel avec métadonnées enrichies"""
|
| 206 |
timestamp = datetime.now().isoformat()
|
| 207 |
-
|
| 208 |
-
# Créer une entrée mémoire enrichie
|
| 209 |
memory_entry = f"""[{timestamp}]
|
| 210 |
Sujet: {', '.join(subject_keywords)}
|
| 211 |
Utilisateur: {user_msg}
|
| 212 |
Kibali: {ai_response}"""
|
| 213 |
-
|
| 214 |
-
# Métadonnées
|
| 215 |
metadata = {
|
| 216 |
"timestamp": timestamp,
|
| 217 |
"subject_keywords": subject_keywords,
|
|
@@ -219,61 +220,45 @@ Kibali: {ai_response}"""
|
|
| 219 |
"ai_length": len(ai_response),
|
| 220 |
"hash": hashlib.md5(memory_entry.encode()).hexdigest()
|
| 221 |
}
|
| 222 |
-
|
| 223 |
-
# Éviter les doublons
|
| 224 |
if metadata["hash"] not in [m.get("hash") for m in memory_metadata]:
|
| 225 |
memory_texts.append(memory_entry)
|
| 226 |
memory_metadata.append(metadata)
|
| 227 |
-
|
| 228 |
-
# Ajout vectoriel
|
| 229 |
mem_emb = embed_model.encode([memory_entry], normalize_embeddings=True).astype('float32')
|
| 230 |
memory_index.add(mem_emb)
|
| 231 |
-
|
| 232 |
logger.info(f"Mémoire ajoutée en temps réel: {subject_keywords} (total: {len(memory_texts)})")
|
| 233 |
return True
|
| 234 |
return False
|
| 235 |
|
| 236 |
def retrieve_adaptive_memory(query: str, k: int = 5) -> tuple:
|
| 237 |
-
"""Récupère la mémoire de façon adaptative selon le contexte"""
|
| 238 |
if memory_index.ntotal == 0:
|
| 239 |
return [], []
|
| 240 |
-
|
| 241 |
query_emb = embed_model.encode([query], normalize_embeddings=True).astype('float32')
|
| 242 |
-
|
| 243 |
-
# Recherche de base
|
| 244 |
-
k_search = min(k * 2, memory_index.ntotal) # Chercher plus pour filtrer ensuite
|
| 245 |
D, I = memory_index.search(query_emb, k=k_search)
|
| 246 |
-
|
| 247 |
-
# Filtrage intelligent avec scoring
|
| 248 |
results = []
|
| 249 |
for dist, idx in zip(D[0], I[0]):
|
| 250 |
if 0 <= idx < len(memory_texts):
|
| 251 |
metadata = memory_metadata[idx] if idx < len(memory_metadata) else {}
|
| 252 |
-
|
| 253 |
-
# Score de pertinence
|
| 254 |
recency_score = 1.0 / (1 + (datetime.now() - datetime.fromisoformat(metadata.get("timestamp", datetime.now().isoformat()))).seconds / 3600)
|
| 255 |
similarity_score = 1.0 / (1 + dist)
|
| 256 |
-
|
| 257 |
-
# Bonus si les mots-clés du sujet actuel correspondent
|
| 258 |
keyword_bonus = 0
|
| 259 |
if conversation_ctx.subject_keywords:
|
| 260 |
text_lower = memory_texts[idx].lower()
|
| 261 |
keyword_bonus = sum(1 for kw in conversation_ctx.subject_keywords if kw in text_lower) * 0.1
|
| 262 |
-
|
| 263 |
total_score = similarity_score * 0.6 + recency_score * 0.3 + keyword_bonus
|
| 264 |
-
|
| 265 |
results.append({
|
| 266 |
"text": memory_texts[idx],
|
| 267 |
"score": total_score,
|
| 268 |
"metadata": metadata
|
| 269 |
})
|
| 270 |
-
|
| 271 |
-
# Trier par score et prendre les top k
|
| 272 |
results = sorted(results, key=lambda x: x["score"], reverse=True)[:k]
|
| 273 |
-
|
| 274 |
texts = [r["text"] for r in results]
|
| 275 |
scores = [r["score"] for r in results]
|
| 276 |
-
|
| 277 |
return texts, scores
|
| 278 |
|
| 279 |
# --- ROUTES ---
|
|
@@ -295,18 +280,17 @@ async def chat(request: ChatRequest):
|
|
| 295 |
user_message = request.messages[-1].content.strip()
|
| 296 |
if not user_message:
|
| 297 |
raise HTTPException(status_code=400, detail="Message vide")
|
| 298 |
-
|
| 299 |
geo = {
|
| 300 |
"latitude": request.latitude,
|
| 301 |
"longitude": request.longitude,
|
| 302 |
"city": request.city or "Libreville"
|
| 303 |
}
|
| 304 |
-
|
| 305 |
-
# Mise à jour du contexte conversationnel en temps réel
|
| 306 |
user_emb = embed_model.encode([user_message], normalize_embeddings=True).astype('float32')
|
| 307 |
conversation_ctx.update_subject(user_message, user_emb)
|
| 308 |
-
|
| 309 |
-
#
|
| 310 |
rag_context = ""
|
| 311 |
rag_sources = []
|
| 312 |
if doc_index.ntotal > 0 and len(doc_chunks) > 0:
|
|
@@ -319,72 +303,62 @@ async def chat(request: ChatRequest):
|
|
| 319 |
rag_sources.append(doc_metadata[idx].get("source", "PDF"))
|
| 320 |
if relevant_chunks:
|
| 321 |
rag_context = "\n\n".join([f"Document : {chunk}" for chunk in relevant_chunks])
|
| 322 |
-
|
| 323 |
-
#
|
| 324 |
memory_context = ""
|
| 325 |
memory_texts_filtered, memory_scores = retrieve_adaptive_memory(user_message, k=5)
|
| 326 |
if memory_texts_filtered:
|
| 327 |
-
memory_context = "\n\n".join([f"Mémoire (score: {score:.2f}): {text}"
|
| 328 |
for text, score in zip(memory_texts_filtered, memory_scores)])
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
# 3. Réflexion stratégique
|
| 332 |
if request.thinking_mode:
|
| 333 |
execute_reflection_plan(
|
| 334 |
-
user_message,
|
| 335 |
geo_info=geo,
|
| 336 |
messages=request.messages,
|
| 337 |
current_subject=conversation_ctx.current_subject,
|
| 338 |
subject_keywords=conversation_ctx.subject_keywords
|
| 339 |
)
|
| 340 |
-
|
| 341 |
-
#
|
| 342 |
search_query = user_message
|
| 343 |
if conversation_ctx.subject_keywords:
|
| 344 |
search_query = f"{user_message} {' '.join(conversation_ctx.subject_keywords[:3])} Gabon"
|
| 345 |
-
|
| 346 |
search_results = web_search(search_query)
|
| 347 |
web_context = "\n".join([f"- {r['content'][:500]}" for r in search_results.get("results", [])[:6]])
|
| 348 |
web_images = search_results.get("images", [])[:4]
|
| 349 |
-
|
| 350 |
-
#
|
| 351 |
system_prompt = f"""Tu es Kibali, un assistant IA chaleureux, précis et expert du Gabon, basé à {geo['city']}.
|
| 352 |
Réponds toujours en français, de façon naturelle, concise et factuelle.
|
| 353 |
-
|
| 354 |
CONTEXTE CONVERSATIONNEL ACTUEL:
|
| 355 |
- Sujet en cours: {', '.join(conversation_ctx.subject_keywords) if conversation_ctx.subject_keywords else 'Nouveau sujet'}
|
| 356 |
- Nombre de messages sur ce sujet: {conversation_ctx.message_count}
|
| 357 |
-
|
| 358 |
PRIORITÉ DES SOURCES:
|
| 359 |
1. Documents uploadés (PDF Vault) - Source la plus fiable
|
| 360 |
2. Mémoire conversationnelle récente et pertinente
|
| 361 |
3. Informations Web actualisées
|
| 362 |
-
|
| 363 |
Si une information vient d'un document uploadé, mentionne-le brièvement.
|
| 364 |
Adapte-toi aux changements brusques de sujet en restant cohérent."""
|
| 365 |
-
|
| 366 |
full_prompt = f"""### INSTRUCTIONS STRICTES :
|
| 367 |
{system_prompt}
|
| 368 |
-
|
| 369 |
### CONTEXTE DOCUMENTS (PDF Vault) :
|
| 370 |
{rag_context if rag_context else "Aucun document pertinent trouvé."}
|
| 371 |
-
|
| 372 |
### HISTORIQUE PERTINENT (Mémoire adaptative) :
|
| 373 |
{memory_context if memory_context else "Pas d'historique pertinent."}
|
| 374 |
-
|
| 375 |
### INFORMATIONS WEB RÉCENTES :
|
| 376 |
{web_context if web_context else "Pas d'informations web disponibles."}
|
| 377 |
-
|
| 378 |
### QUESTION :
|
| 379 |
{user_message}
|
| 380 |
-
|
| 381 |
### RÉPONSE (en français uniquement) :
|
| 382 |
"""
|
| 383 |
-
|
| 384 |
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=8192).to(model.device)
|
| 385 |
-
|
| 386 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=120.0)
|
| 387 |
-
|
| 388 |
def generate_stream():
|
| 389 |
try:
|
| 390 |
model.generate(
|
|
@@ -400,25 +374,23 @@ Adapte-toi aux changements brusques de sujet en restant cohérent."""
|
|
| 400 |
)
|
| 401 |
except Exception as e:
|
| 402 |
logger.error(f"Erreur génération : {e}")
|
| 403 |
-
|
| 404 |
thread = Thread(target=generate_stream)
|
| 405 |
thread.start()
|
| 406 |
-
|
| 407 |
response_text = ""
|
| 408 |
for new_text in streamer:
|
| 409 |
if new_text is not None:
|
| 410 |
response_text += new_text
|
| 411 |
response_text = response_text.strip()
|
| 412 |
-
|
| 413 |
-
# Ajout en temps réel à la mémoire
|
| 414 |
if response_text:
|
| 415 |
add_to_memory_realtime(
|
| 416 |
-
user_message,
|
| 417 |
-
response_text,
|
| 418 |
conversation_ctx.subject_keywords
|
| 419 |
)
|
| 420 |
-
|
| 421 |
-
# Informations contextuelles
|
| 422 |
context_info = {
|
| 423 |
"subject_keywords": conversation_ctx.subject_keywords,
|
| 424 |
"message_count": conversation_ctx.message_count,
|
|
@@ -426,31 +398,25 @@ Adapte-toi aux changements brusques de sujet en restant cohérent."""
|
|
| 426 |
"rag_sources": list(set(rag_sources)),
|
| 427 |
"web_results": len(search_results.get("results", []))
|
| 428 |
}
|
| 429 |
-
|
| 430 |
return ChatResponse(response=response_text, images=web_images, context_info=context_info)
|
| 431 |
|
| 432 |
@app.post("/upload")
|
| 433 |
async def upload(files: List[UploadFile] = File(...)):
|
| 434 |
total_added = 0
|
| 435 |
processed_files = 0
|
| 436 |
-
|
| 437 |
for file in files:
|
| 438 |
if not file.filename.lower().endswith(".pdf"):
|
| 439 |
continue
|
| 440 |
-
|
| 441 |
try:
|
| 442 |
content = await file.read()
|
| 443 |
text = extract_text_from_pdf(content)
|
| 444 |
-
|
| 445 |
if not text:
|
| 446 |
logger.warning(f"Aucun texte extrait de {file.filename}")
|
| 447 |
continue
|
| 448 |
-
|
| 449 |
chunks = chunk_text(text)
|
| 450 |
if not chunks:
|
| 451 |
continue
|
| 452 |
-
|
| 453 |
-
# Métadonnées pour chaque chunk
|
| 454 |
timestamp = datetime.now().isoformat()
|
| 455 |
for chunk in chunks:
|
| 456 |
doc_metadata.append({
|
|
@@ -458,19 +424,14 @@ async def upload(files: List[UploadFile] = File(...)):
|
|
| 458 |
"timestamp": timestamp,
|
| 459 |
"length": len(chunk)
|
| 460 |
})
|
| 461 |
-
|
| 462 |
embeddings = embed_model.encode(chunks, normalize_embeddings=True).astype('float32')
|
| 463 |
doc_index.add(embeddings)
|
| 464 |
doc_chunks.extend(chunks)
|
| 465 |
-
|
| 466 |
total_added += len(chunks)
|
| 467 |
processed_files += 1
|
| 468 |
-
|
| 469 |
logger.info(f"Upload réussi : {file.filename} → {len(chunks)} chunks ajoutés")
|
| 470 |
-
|
| 471 |
except Exception as e:
|
| 472 |
logger.error(f"Erreur lors du traitement de {file.filename} : {e}")
|
| 473 |
-
|
| 474 |
return {
|
| 475 |
"status": "success",
|
| 476 |
"files_processed": processed_files,
|
|
@@ -484,7 +445,6 @@ async def upload_pdfs(files: List[UploadFile] = File(...)):
|
|
| 484 |
|
| 485 |
@app.post("/clear-memory")
|
| 486 |
async def clear_memory():
|
| 487 |
-
"""Efface la mémoire conversationnelle"""
|
| 488 |
global memory_index, memory_texts, memory_metadata
|
| 489 |
memory_index = faiss.IndexFlatL2(dimension)
|
| 490 |
memory_texts = []
|
|
@@ -495,6 +455,6 @@ async def clear_memory():
|
|
| 495 |
# --- DEMARRAGE ---
|
| 496 |
@app.on_event("startup")
|
| 497 |
async def startup_event():
|
| 498 |
-
logger.info("🚀 Kibali AI API démarrée avec succès !")
|
| 499 |
-
logger.info(f"Accès :
|
| 500 |
logger.info(f"Mémoire adaptative et réflexion contextuelle activées ✓")
|
|
|
|
| 54 |
allow_headers=["*"],
|
| 55 |
)
|
| 56 |
|
| 57 |
+
# --- CHARGEMENT DES MODÈLES (téléchargement depuis Hugging Face Hub) ---
|
| 58 |
+
HF_MODEL_ID = "BelikanM/kibali-final-merged"
|
| 59 |
+
CACHE_DIR = "/data/cache" # Dossier persistant sur HF Spaces
|
| 60 |
+
|
| 61 |
+
os.makedirs(CACHE_DIR, exist_ok=True)
|
| 62 |
|
| 63 |
logger.info("Chargement du modèle d'embedding...")
|
| 64 |
+
embed_model = SentenceTransformer(
|
| 65 |
+
'paraphrase-multilingual-MiniLM-L12-v2',
|
| 66 |
+
cache_folder=CACHE_DIR
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
logger.info(f"Chargement du tokenizer et du modèle LLM depuis Hugging Face : {HF_MODEL_ID}")
|
| 70 |
+
tokenizer = AutoTokenizer.from_pretrained(HF_MODEL_ID, cache_dir=CACHE_DIR)
|
| 71 |
|
|
|
|
|
|
|
|
|
|
| 72 |
if tokenizer.pad_token is None:
|
| 73 |
tokenizer.pad_token = tokenizer.eos_token
|
| 74 |
|
| 75 |
+
# Configuration 4-bit pour réduire la consommation VRAM
|
| 76 |
bnb_config = BitsAndBytesConfig(
|
| 77 |
load_in_4bit=True,
|
| 78 |
bnb_4bit_use_double_quant=True,
|
|
|
|
| 80 |
bnb_4bit_compute_dtype=torch.float16
|
| 81 |
)
|
| 82 |
|
| 83 |
+
try:
|
| 84 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 85 |
+
HF_MODEL_ID,
|
| 86 |
+
quantization_config=bnb_config,
|
| 87 |
+
device_map="auto",
|
| 88 |
+
torch_dtype=torch.float16,
|
| 89 |
+
trust_remote_code=True,
|
| 90 |
+
low_cpu_mem_usage=True,
|
| 91 |
+
cache_dir=CACHE_DIR
|
| 92 |
+
)
|
| 93 |
+
logger.info(f"Modèle chargé avec succès sur {model.device}")
|
| 94 |
+
except Exception as e:
|
| 95 |
+
logger.error(f"Erreur lors du chargement du modèle : {e}")
|
| 96 |
+
raise e
|
| 97 |
|
| 98 |
# --- BASES VECTORIELLES GLOBALES ---
|
| 99 |
dimension = 384
|
| 100 |
doc_index = faiss.IndexFlatL2(dimension)
|
| 101 |
doc_chunks: List[str] = []
|
| 102 |
+
doc_metadata: List[dict] = []
|
| 103 |
|
| 104 |
memory_index = faiss.IndexFlatL2(dimension)
|
| 105 |
memory_texts: List[str] = []
|
| 106 |
+
memory_metadata: List[dict] = []
|
| 107 |
|
| 108 |
# --- GESTION DU CONTEXTE CONVERSATIONNEL ---
|
| 109 |
class ConversationContext:
|
|
|
|
| 113 |
self.subject_start_time = None
|
| 114 |
self.message_count = 0
|
| 115 |
self.subject_keywords = []
|
| 116 |
+
|
| 117 |
def update_subject(self, message: str, embedding: np.ndarray):
|
|
|
|
| 118 |
keywords = self._extract_keywords(message)
|
| 119 |
+
|
|
|
|
| 120 |
if self.subject_embedding is not None:
|
| 121 |
similarity = np.dot(embedding.flatten(), self.subject_embedding.flatten())
|
| 122 |
+
if similarity < 0.6:
|
| 123 |
logger.info(f"Changement de sujet détecté (similarité: {similarity:.2f})")
|
| 124 |
self._archive_current_subject()
|
| 125 |
self.current_subject = message
|
|
|
|
| 130 |
else:
|
| 131 |
self.message_count += 1
|
| 132 |
self.subject_keywords.extend(keywords)
|
| 133 |
+
self.subject_keywords = list(set(self.subject_keywords))[:10]
|
| 134 |
else:
|
| 135 |
self.current_subject = message
|
| 136 |
self.subject_embedding = embedding
|
| 137 |
self.subject_start_time = datetime.now()
|
| 138 |
self.message_count = 1
|
| 139 |
self.subject_keywords = keywords
|
| 140 |
+
|
| 141 |
def _extract_keywords(self, text: str) -> List[str]:
|
| 142 |
+
stopwords = {'le', 'la', 'les', 'un', 'une', 'des', 'de', 'du', 'et', 'ou',
|
|
|
|
| 143 |
'est', 'sont', 'à', 'au', 'en', 'pour', 'dans', 'sur', 'avec'}
|
| 144 |
words = text.lower().split()
|
| 145 |
keywords = [w for w in words if len(w) > 3 and w not in stopwords]
|
| 146 |
return keywords[:5]
|
| 147 |
+
|
| 148 |
def _archive_current_subject(self):
|
|
|
|
| 149 |
if self.current_subject and memory_index.ntotal > 0:
|
|
|
|
| 150 |
summary = {
|
| 151 |
"subject": self.current_subject[:200],
|
| 152 |
"keywords": self.subject_keywords,
|
|
|
|
| 207 |
return chunks
|
| 208 |
|
| 209 |
def add_to_memory_realtime(user_msg: str, ai_response: str, subject_keywords: List[str]):
|
|
|
|
| 210 |
timestamp = datetime.now().isoformat()
|
|
|
|
|
|
|
| 211 |
memory_entry = f"""[{timestamp}]
|
| 212 |
Sujet: {', '.join(subject_keywords)}
|
| 213 |
Utilisateur: {user_msg}
|
| 214 |
Kibali: {ai_response}"""
|
| 215 |
+
|
|
|
|
| 216 |
metadata = {
|
| 217 |
"timestamp": timestamp,
|
| 218 |
"subject_keywords": subject_keywords,
|
|
|
|
| 220 |
"ai_length": len(ai_response),
|
| 221 |
"hash": hashlib.md5(memory_entry.encode()).hexdigest()
|
| 222 |
}
|
| 223 |
+
|
|
|
|
| 224 |
if metadata["hash"] not in [m.get("hash") for m in memory_metadata]:
|
| 225 |
memory_texts.append(memory_entry)
|
| 226 |
memory_metadata.append(metadata)
|
|
|
|
|
|
|
| 227 |
mem_emb = embed_model.encode([memory_entry], normalize_embeddings=True).astype('float32')
|
| 228 |
memory_index.add(mem_emb)
|
|
|
|
| 229 |
logger.info(f"Mémoire ajoutée en temps réel: {subject_keywords} (total: {len(memory_texts)})")
|
| 230 |
return True
|
| 231 |
return False
|
| 232 |
|
| 233 |
def retrieve_adaptive_memory(query: str, k: int = 5) -> tuple:
|
|
|
|
| 234 |
if memory_index.ntotal == 0:
|
| 235 |
return [], []
|
| 236 |
+
|
| 237 |
query_emb = embed_model.encode([query], normalize_embeddings=True).astype('float32')
|
| 238 |
+
k_search = min(k * 2, memory_index.ntotal)
|
|
|
|
|
|
|
| 239 |
D, I = memory_index.search(query_emb, k=k_search)
|
| 240 |
+
|
|
|
|
| 241 |
results = []
|
| 242 |
for dist, idx in zip(D[0], I[0]):
|
| 243 |
if 0 <= idx < len(memory_texts):
|
| 244 |
metadata = memory_metadata[idx] if idx < len(memory_metadata) else {}
|
|
|
|
|
|
|
| 245 |
recency_score = 1.0 / (1 + (datetime.now() - datetime.fromisoformat(metadata.get("timestamp", datetime.now().isoformat()))).seconds / 3600)
|
| 246 |
similarity_score = 1.0 / (1 + dist)
|
|
|
|
|
|
|
| 247 |
keyword_bonus = 0
|
| 248 |
if conversation_ctx.subject_keywords:
|
| 249 |
text_lower = memory_texts[idx].lower()
|
| 250 |
keyword_bonus = sum(1 for kw in conversation_ctx.subject_keywords if kw in text_lower) * 0.1
|
|
|
|
| 251 |
total_score = similarity_score * 0.6 + recency_score * 0.3 + keyword_bonus
|
| 252 |
+
|
| 253 |
results.append({
|
| 254 |
"text": memory_texts[idx],
|
| 255 |
"score": total_score,
|
| 256 |
"metadata": metadata
|
| 257 |
})
|
| 258 |
+
|
|
|
|
| 259 |
results = sorted(results, key=lambda x: x["score"], reverse=True)[:k]
|
|
|
|
| 260 |
texts = [r["text"] for r in results]
|
| 261 |
scores = [r["score"] for r in results]
|
|
|
|
| 262 |
return texts, scores
|
| 263 |
|
| 264 |
# --- ROUTES ---
|
|
|
|
| 280 |
user_message = request.messages[-1].content.strip()
|
| 281 |
if not user_message:
|
| 282 |
raise HTTPException(status_code=400, detail="Message vide")
|
| 283 |
+
|
| 284 |
geo = {
|
| 285 |
"latitude": request.latitude,
|
| 286 |
"longitude": request.longitude,
|
| 287 |
"city": request.city or "Libreville"
|
| 288 |
}
|
| 289 |
+
|
|
|
|
| 290 |
user_emb = embed_model.encode([user_message], normalize_embeddings=True).astype('float32')
|
| 291 |
conversation_ctx.update_subject(user_message, user_emb)
|
| 292 |
+
|
| 293 |
+
# RAG Documents PDF
|
| 294 |
rag_context = ""
|
| 295 |
rag_sources = []
|
| 296 |
if doc_index.ntotal > 0 and len(doc_chunks) > 0:
|
|
|
|
| 303 |
rag_sources.append(doc_metadata[idx].get("source", "PDF"))
|
| 304 |
if relevant_chunks:
|
| 305 |
rag_context = "\n\n".join([f"Document : {chunk}" for chunk in relevant_chunks])
|
| 306 |
+
|
| 307 |
+
# Mémoire adaptative
|
| 308 |
memory_context = ""
|
| 309 |
memory_texts_filtered, memory_scores = retrieve_adaptive_memory(user_message, k=5)
|
| 310 |
if memory_texts_filtered:
|
| 311 |
+
memory_context = "\n\n".join([f"Mémoire (score: {score:.2f}): {text}"
|
| 312 |
for text, score in zip(memory_texts_filtered, memory_scores)])
|
| 313 |
+
|
| 314 |
+
# Réflexion stratégique
|
|
|
|
| 315 |
if request.thinking_mode:
|
| 316 |
execute_reflection_plan(
|
| 317 |
+
user_message,
|
| 318 |
geo_info=geo,
|
| 319 |
messages=request.messages,
|
| 320 |
current_subject=conversation_ctx.current_subject,
|
| 321 |
subject_keywords=conversation_ctx.subject_keywords
|
| 322 |
)
|
| 323 |
+
|
| 324 |
+
# Recherche Web
|
| 325 |
search_query = user_message
|
| 326 |
if conversation_ctx.subject_keywords:
|
| 327 |
search_query = f"{user_message} {' '.join(conversation_ctx.subject_keywords[:3])} Gabon"
|
| 328 |
+
|
| 329 |
search_results = web_search(search_query)
|
| 330 |
web_context = "\n".join([f"- {r['content'][:500]}" for r in search_results.get("results", [])[:6]])
|
| 331 |
web_images = search_results.get("images", [])[:4]
|
| 332 |
+
|
| 333 |
+
# Prompt final
|
| 334 |
system_prompt = f"""Tu es Kibali, un assistant IA chaleureux, précis et expert du Gabon, basé à {geo['city']}.
|
| 335 |
Réponds toujours en français, de façon naturelle, concise et factuelle.
|
|
|
|
| 336 |
CONTEXTE CONVERSATIONNEL ACTUEL:
|
| 337 |
- Sujet en cours: {', '.join(conversation_ctx.subject_keywords) if conversation_ctx.subject_keywords else 'Nouveau sujet'}
|
| 338 |
- Nombre de messages sur ce sujet: {conversation_ctx.message_count}
|
|
|
|
| 339 |
PRIORITÉ DES SOURCES:
|
| 340 |
1. Documents uploadés (PDF Vault) - Source la plus fiable
|
| 341 |
2. Mémoire conversationnelle récente et pertinente
|
| 342 |
3. Informations Web actualisées
|
|
|
|
| 343 |
Si une information vient d'un document uploadé, mentionne-le brièvement.
|
| 344 |
Adapte-toi aux changements brusques de sujet en restant cohérent."""
|
| 345 |
+
|
| 346 |
full_prompt = f"""### INSTRUCTIONS STRICTES :
|
| 347 |
{system_prompt}
|
|
|
|
| 348 |
### CONTEXTE DOCUMENTS (PDF Vault) :
|
| 349 |
{rag_context if rag_context else "Aucun document pertinent trouvé."}
|
|
|
|
| 350 |
### HISTORIQUE PERTINENT (Mémoire adaptative) :
|
| 351 |
{memory_context if memory_context else "Pas d'historique pertinent."}
|
|
|
|
| 352 |
### INFORMATIONS WEB RÉCENTES :
|
| 353 |
{web_context if web_context else "Pas d'informations web disponibles."}
|
|
|
|
| 354 |
### QUESTION :
|
| 355 |
{user_message}
|
|
|
|
| 356 |
### RÉPONSE (en français uniquement) :
|
| 357 |
"""
|
| 358 |
+
|
| 359 |
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=8192).to(model.device)
|
|
|
|
| 360 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=120.0)
|
| 361 |
+
|
| 362 |
def generate_stream():
|
| 363 |
try:
|
| 364 |
model.generate(
|
|
|
|
| 374 |
)
|
| 375 |
except Exception as e:
|
| 376 |
logger.error(f"Erreur génération : {e}")
|
| 377 |
+
|
| 378 |
thread = Thread(target=generate_stream)
|
| 379 |
thread.start()
|
| 380 |
+
|
| 381 |
response_text = ""
|
| 382 |
for new_text in streamer:
|
| 383 |
if new_text is not None:
|
| 384 |
response_text += new_text
|
| 385 |
response_text = response_text.strip()
|
| 386 |
+
|
|
|
|
| 387 |
if response_text:
|
| 388 |
add_to_memory_realtime(
|
| 389 |
+
user_message,
|
| 390 |
+
response_text,
|
| 391 |
conversation_ctx.subject_keywords
|
| 392 |
)
|
| 393 |
+
|
|
|
|
| 394 |
context_info = {
|
| 395 |
"subject_keywords": conversation_ctx.subject_keywords,
|
| 396 |
"message_count": conversation_ctx.message_count,
|
|
|
|
| 398 |
"rag_sources": list(set(rag_sources)),
|
| 399 |
"web_results": len(search_results.get("results", []))
|
| 400 |
}
|
| 401 |
+
|
| 402 |
return ChatResponse(response=response_text, images=web_images, context_info=context_info)
|
| 403 |
|
| 404 |
@app.post("/upload")
|
| 405 |
async def upload(files: List[UploadFile] = File(...)):
|
| 406 |
total_added = 0
|
| 407 |
processed_files = 0
|
|
|
|
| 408 |
for file in files:
|
| 409 |
if not file.filename.lower().endswith(".pdf"):
|
| 410 |
continue
|
|
|
|
| 411 |
try:
|
| 412 |
content = await file.read()
|
| 413 |
text = extract_text_from_pdf(content)
|
|
|
|
| 414 |
if not text:
|
| 415 |
logger.warning(f"Aucun texte extrait de {file.filename}")
|
| 416 |
continue
|
|
|
|
| 417 |
chunks = chunk_text(text)
|
| 418 |
if not chunks:
|
| 419 |
continue
|
|
|
|
|
|
|
| 420 |
timestamp = datetime.now().isoformat()
|
| 421 |
for chunk in chunks:
|
| 422 |
doc_metadata.append({
|
|
|
|
| 424 |
"timestamp": timestamp,
|
| 425 |
"length": len(chunk)
|
| 426 |
})
|
|
|
|
| 427 |
embeddings = embed_model.encode(chunks, normalize_embeddings=True).astype('float32')
|
| 428 |
doc_index.add(embeddings)
|
| 429 |
doc_chunks.extend(chunks)
|
|
|
|
| 430 |
total_added += len(chunks)
|
| 431 |
processed_files += 1
|
|
|
|
| 432 |
logger.info(f"Upload réussi : {file.filename} → {len(chunks)} chunks ajoutés")
|
|
|
|
| 433 |
except Exception as e:
|
| 434 |
logger.error(f"Erreur lors du traitement de {file.filename} : {e}")
|
|
|
|
| 435 |
return {
|
| 436 |
"status": "success",
|
| 437 |
"files_processed": processed_files,
|
|
|
|
| 445 |
|
| 446 |
@app.post("/clear-memory")
|
| 447 |
async def clear_memory():
|
|
|
|
| 448 |
global memory_index, memory_texts, memory_metadata
|
| 449 |
memory_index = faiss.IndexFlatL2(dimension)
|
| 450 |
memory_texts = []
|
|
|
|
| 455 |
# --- DEMARRAGE ---
|
| 456 |
@app.on_event("startup")
|
| 457 |
async def startup_event():
|
| 458 |
+
logger.info("🚀 Kibali AI API démarrée avec succès sur Hugging Face Spaces !")
|
| 459 |
+
logger.info(f"Accès : https://your-username-your-space.hf.space | Docs : /docs")
|
| 460 |
logger.info(f"Mémoire adaptative et réflexion contextuelle activées ✓")
|