Spaces:
Runtime error
Runtime error
Eddyhzd
commited on
Commit
·
81cf8d0
1
Parent(s):
2299cb4
FIX: mcp + llm
Browse files
app.py
CHANGED
|
@@ -1,30 +1,98 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from openai import OpenAI
|
| 3 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
cle_api = os.environ.get("CLE_API_MISTRAL")
|
| 6 |
|
| 7 |
# Initialisation du client Mistral (API compatible OpenAI)
|
| 8 |
client = OpenAI(api_key=cle_api, base_url="https://api.mistral.ai/v1")
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def chatbot(message, history):
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
for user_msg, bot_msg in history:
|
| 15 |
-
messages.append({"role": "user", "content": user_msg})
|
| 16 |
-
messages.append({"role": "assistant", "content": bot_msg})
|
| 17 |
-
|
| 18 |
-
messages.append({"role": "user", "content": message})
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
)
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
return history, history
|
| 29 |
|
| 30 |
with gr.Blocks() as demo:
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from openai import OpenAI
|
| 3 |
import os
|
| 4 |
+
import asyncio
|
| 5 |
+
import re
|
| 6 |
+
from mcp import ClientSession
|
| 7 |
+
from mcp.client.stdio import stdio_client
|
| 8 |
|
| 9 |
cle_api = os.environ.get("CLE_API_MISTRAL")
|
| 10 |
|
| 11 |
# Initialisation du client Mistral (API compatible OpenAI)
|
| 12 |
client = OpenAI(api_key=cle_api, base_url="https://api.mistral.ai/v1")
|
| 13 |
|
| 14 |
+
def extract_csv_uri_and_question(text: str):
|
| 15 |
+
"""Retourne (uri, question). Si pas de uri, uri == None."""
|
| 16 |
+
m = re.search(r"(csv://\S+)", text)
|
| 17 |
+
if not m:
|
| 18 |
+
return None, text.strip()
|
| 19 |
+
uri = m.group(1).rstrip(",.;") # retire ponctuation courante
|
| 20 |
+
question = text.replace(m.group(1), "").strip()
|
| 21 |
+
if question == "":
|
| 22 |
+
question = "Donne un aperçu et un résumé des données."
|
| 23 |
+
return uri, question
|
| 24 |
+
|
| 25 |
+
async def fetch_csv_preview(uri: str) -> str:
|
| 26 |
+
# Paramètres pour lancer le provider en tant que sous-processus
|
| 27 |
+
params = {
|
| 28 |
+
"command": "python",
|
| 29 |
+
"args": ["csv_provider.py"],
|
| 30 |
+
}
|
| 31 |
+
# Création de la session
|
| 32 |
+
async with ClientSession(stdio_client, params) as session:
|
| 33 |
+
resp = await session.get_resource(uri)
|
| 34 |
+
return resp.resource.text
|
| 35 |
+
|
| 36 |
+
def trim_context(text: str, max_chars: int = 4000) -> str:
|
| 37 |
+
if not text:
|
| 38 |
+
return ""
|
| 39 |
+
if len(text) <= max_chars:
|
| 40 |
+
return text
|
| 41 |
+
# garder le début (head) utile pour les CSV preview
|
| 42 |
+
return text[:max_chars] + "\n... (truncated)"
|
| 43 |
+
|
| 44 |
+
def call_llm(messages, model="mistral-large", max_tokens=512, temperature=0.2):
|
| 45 |
+
"""Appel synchrones au LLM (OpenAI-compatible)."""
|
| 46 |
+
resp = client.chat.completions.create(
|
| 47 |
+
model=model,
|
| 48 |
+
messages=messages,
|
| 49 |
+
max_tokens=max_tokens,
|
| 50 |
+
temperature=temperature,
|
| 51 |
+
)
|
| 52 |
+
return resp.choices[0].message["content"].strip()
|
| 53 |
+
|
| 54 |
+
# --- LOGIQUE DU CHATBOT ---
|
| 55 |
def chatbot(message, history):
|
| 56 |
+
history = history or []
|
| 57 |
+
history.append(("Vous: " + message, ""))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
uri, question = extract_csv_uri_and_question(message)
|
| 60 |
+
|
| 61 |
+
# Préparer prompt system
|
| 62 |
+
system_prompt = (
|
| 63 |
+
"Tu es un assistant utile. Si une ressource csv://... est fournie, utilise SON CONTENU "
|
| 64 |
+
"pour répondre précisément aux questions. Ne devine pas les valeurs absentes. "
|
| 65 |
+
"Rends la réponse concise et fournis des exemples si pertinent."
|
| 66 |
)
|
| 67 |
|
| 68 |
+
# Si l'utilisateur demande d'utiliser un CSV -> récupérer via MCP
|
| 69 |
+
csv_text = None
|
| 70 |
+
if uri:
|
| 71 |
+
try:
|
| 72 |
+
csv_text = asyncio.run(fetch_csv_preview(uri))
|
| 73 |
+
except Exception as e:
|
| 74 |
+
reply = f"Erreur de récupération MCP pour {uri} : {e}"
|
| 75 |
+
history[-1] = (history[-1][0], "Bot: " + reply)
|
| 76 |
+
return history, history
|
| 77 |
+
|
| 78 |
+
# Construire messages pour LLM
|
| 79 |
+
messages = [{"role": "system", "content": system_prompt}]
|
| 80 |
+
if csv_text:
|
| 81 |
+
context = trim_context(csv_text, max_chars=4000)
|
| 82 |
+
messages.append({"role": "system", "content": f"--- CONTEXTE CSV ({uri}) ---\n{context}\n--- FIN CONTEXTE ---"})
|
| 83 |
+
user_content = f"Question (en se basant sur le CSV):\n{question}"
|
| 84 |
+
else:
|
| 85 |
+
user_content = question
|
| 86 |
+
|
| 87 |
+
messages.append({"role": "user", "content": user_content})
|
| 88 |
+
|
| 89 |
+
# Appel LLM
|
| 90 |
+
try:
|
| 91 |
+
llm_reply = call_llm(messages)
|
| 92 |
+
except Exception as e:
|
| 93 |
+
llm_reply = f"Erreur LLM : {e}"
|
| 94 |
+
|
| 95 |
+
history[-1] = (history[-1][0], "Bot: " + llm_reply)
|
| 96 |
return history, history
|
| 97 |
|
| 98 |
with gr.Blocks() as demo:
|