Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from openai import OpenAI | |
| import os | |
| import asyncio | |
| import re | |
| from mcp import ClientSession | |
| from mcp.client.stdio import stdio_client | |
| cle_api = os.environ.get("CLE_API_MISTRAL") | |
| # Initialisation du client Mistral (API compatible OpenAI) | |
| client = OpenAI(api_key=cle_api, base_url="https://api.mistral.ai/v1") | |
| def extract_csv_uri_and_question(text: str): | |
| """Retourne (uri, question). Si pas de uri, uri == None.""" | |
| m = re.search(r"(csv://\S+)", text) | |
| if not m: | |
| return None, text.strip() | |
| uri = m.group(1).rstrip(",.;") # retire ponctuation courante | |
| question = text.replace(m.group(1), "").strip() | |
| if question == "": | |
| question = "Donne un aperçu et un résumé des données." | |
| return uri, question | |
| async def fetch_csv_preview(uri: str) -> str: | |
| # Paramètres pour lancer le provider en tant que sous-processus | |
| params = { | |
| "command": "python", | |
| "args": ["csv_provider.py"], | |
| } | |
| # Création de la session | |
| async with ClientSession(stdio_client, params) as session: | |
| resp = await session.get_resource(uri) | |
| return resp.resource.text | |
| def trim_context(text: str, max_chars: int = 4000) -> str: | |
| if not text: | |
| return "" | |
| if len(text) <= max_chars: | |
| return text | |
| # garder le début (head) utile pour les CSV preview | |
| return text[:max_chars] + "\n... (truncated)" | |
| def call_llm(messages, model="mistral-small-latest", max_tokens=512, temperature=0.2): | |
| """Appel synchrones au LLM (OpenAI-compatible).""" | |
| resp = client.chat.completions.create( | |
| model=model, | |
| messages=messages, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| ) | |
| return resp.choices[0].message["content"].strip() | |
| # --- LOGIQUE DU CHATBOT --- | |
| def chatbot(message, history): | |
| history = history or [] | |
| history.append(("Vous: " + message, "")) | |
| uri, question = extract_csv_uri_and_question(message) | |
| # Préparer prompt system | |
| system_prompt = ( | |
| "Tu es un assistant utile. Si une ressource csv://... est fournie, utilise SON CONTENU " | |
| "pour répondre précisément aux questions. Ne devine pas les valeurs absentes. " | |
| "Rends la réponse concise et fournis des exemples si pertinent." | |
| ) | |
| # Si l'utilisateur demande d'utiliser un CSV -> récupérer via MCP | |
| csv_text = None | |
| if uri: | |
| try: | |
| csv_text = asyncio.run(fetch_csv_preview(uri)) | |
| except Exception as e: | |
| reply = f"Erreur de récupération MCP pour {uri} : {e}" | |
| history[-1] = (history[-1][0], "Bot: " + reply) | |
| return history, history | |
| # Construire messages pour LLM | |
| messages = [{"role": "system", "content": system_prompt}] | |
| if csv_text: | |
| context = trim_context(csv_text, max_chars=4000) | |
| messages.append({"role": "system", "content": f"--- CONTEXTE CSV ({uri}) ---\n{context}\n--- FIN CONTEXTE ---"}) | |
| user_content = f"Question (en se basant sur le CSV):\n{question}" | |
| else: | |
| user_content = question | |
| messages.append({"role": "user", "content": user_content}) | |
| # Appel LLM | |
| try: | |
| llm_reply = call_llm(messages) | |
| except Exception as e: | |
| llm_reply = f"Erreur LLM : {e}" | |
| history[-1] = (history[-1][0], "Bot: " + llm_reply) | |
| return history, history | |
| with gr.Blocks() as demo: | |
| chatbot_ui = gr.Chatbot(label="ChatBot") | |
| msg = gr.Textbox(placeholder="Écrivez un message...") | |
| msg.submit(chatbot, [msg, chatbot_ui], [chatbot_ui, chatbot_ui]) | |
| demo.launch() | |