Vortex-Flux / src /modules /jasmine_agent.py
klydekushy's picture
Update src/modules/jasmine_agent.py
f94f7e6 verified
"""
MODULE JASMINE AGENT - V FINAL (AIP LOGIC KERNEL + GROQ)
========================================================
Responsabilité : Traduire le langage naturel en actions ontologiques précises.
"""
import google.generativeai as genai
from groq import Groq
import json
import streamlit as st
import os
import re
import sys
# Gestion des imports
try:
from src.core.schema_extractor import SchemaExtractor
from src.agent.query_planner import QueryPlanner
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
from src.core.schema_extractor import SchemaExtractor
from src.agent.query_planner import QueryPlanner
class JasmineAgent:
def __init__(self, rdf_store, ontology_rules):
self.google_key = os.environ.get("GOOGLE_API_KEY") or st.secrets.get("GOOGLE_API_KEY")
if self.google_key: genai.configure(api_key=self.google_key)
self.groq_key = os.environ.get("GROQ_API_KEY") or st.secrets.get("GROQ_API_KEY")
self.groq_client = Groq(api_key=self.groq_key) if self.groq_key else None
self.rdf_store = rdf_store
self.ontology_rules = ontology_rules
self.MODEL_CASCADE = [
"gemini-2.0-flash-exp",
"llama-3.3-70b-versatile",
"gemini-2.0-flash-lite",
"llama-3.1-8b-instant"
]
def _get_prefixes(self):
return """
PREFIX vortex: <http://vortex.ai/ontology#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
"""
def build_base_system_prompt(self):
return f"""
ROLE: Tu es le Cerveau Numérique (AIP Logic) de VORTEX.
MISSION: Répondre aux questions en interrogeant le Knowledge Graph via SPARQL.
--- TON TERRITOIRE (ONTOLOGIE) ---
1. Prêts : vortex:Pret (liés via 'a_emprunteur' à Client, 'est_garanti_par' à Garant).
2. Clients : vortex:Client (propriété 'nom' ou 'rdfs:label' pour le nom complet).
3. Montants :
- 'vortex:montant' : Donnée brute (peut contenir des doublons).
- 'vortex:montant_net' : GOLDEN RECORD (Valeur unique et fiable pour les calculs).
--- RÈGLES DE NAVIGATION (CRITIQUES) ---
Pour chercher un dossier par NOM (ex: "Koulnan" ou "Aichatou") :
NE JAMAIS faire : ?pret vortex:a_emprunteur 'Koulnan'. (Car a_emprunteur pointe vers un ID technique).
TU DOIS FAIRE UNE JOINTURE :
?pret vortex:a_emprunteur ?client_id .
?client_id rdfs:label ?nom_client .
FILTER(CONTAINS(LCASE(STR(?nom_client)), 'koulnan'))
--- RÈGLES DE CALCUL ---
Pour une SOMME, un MONTANT TOTAL ou un SEUIL (> 100000) :
1. Utilise TOUJOURS 'vortex:montant_net' (le Golden Record).
2. Utilise TOUJOURS l'outil 'execute_sparql'.
3. NE JAMAIS utiliser 'search_semantic' pour des filtres numériques.
--- RÈGLES TECHNIQUES ---
- Namespace UNIQUE : <http://vortex.ai/ontology#> (prefix: vortex).
- Préfixes OBLIGATOIRES :
{self._get_prefixes()}
CONTRAINTES DE SORTIE (JSON STRICT) :
{{
"thought_trace": "1. ANALYSE: ... 2. EXTRACTION: ... 3. NAVIGATION: ... 4. OUTIL: ...",
"tool": "execute_sparql" | "search_semantic" | "none",
"args": {{ "query": "..." }}
}}
"""
def _format_messages_for_groq(self, system_prompt, chat_history, user_message):
msgs = [{"role": "system", "content": system_prompt}]
for m in chat_history[-4:]:
role = "assistant" if m["role"] in ["model", "assistant"] else "user"
content = str(m.get("content", ""))
if "🛠️" not in content and content.strip():
msgs.append({"role": role, "content": content})
msgs.append({"role": "user", "content": user_message})
return msgs
def _format_messages_for_gemini(self, system_prompt, chat_history, user_message):
msgs = [{"role": "user", "parts": [system_prompt]}]
for m in chat_history[-4:]:
role = "user" if m["role"] == "user" else "model"
content = str(m.get("content", ""))
if "🛠️" not in content and content.strip():
msgs.append({"role": role, "parts": [content]})
msgs.append({"role": "user", "parts": [user_message]})
return msgs
def ask(self, user_message, chat_history):
extractor = SchemaExtractor(self.rdf_store)
real_schema = extractor.get_real_schema()
schema_prompt = extractor.generate_prompt_schema()
planner = QueryPlanner(self.rdf_store, real_schema)
plan = planner.analyze_and_plan(user_message)
planning_log = planner.get_planning_logs(plan)
full_system_prompt = f"""
{self.build_base_system_prompt()}
--- 🗺️ TERRITOIRE (SCHÉMA RDF RÉEL) ---
{schema_prompt}
--- 🎯 PLAN TACTIQUE ---
STRATÉGIE: {plan['strategy']}
CONTEXTE: {plan['reason']}
"""
last_error = None
for model_name in self.MODEL_CASCADE:
try:
response_text = ""
if "gemini" in model_name:
if not self.google_key: continue
msgs = self._format_messages_for_gemini(full_system_prompt, chat_history, user_message)
model = genai.GenerativeModel(model_name)
# Force JSON mode for Gemini
res = model.generate_content(msgs, generation_config={"response_mime_type": "application/json"})
response_text = res.text
else:
if not self.groq_client: continue
msgs = self._format_messages_for_groq(full_system_prompt, chat_history, user_message)
completion = self.groq_client.chat.completions.create(
model=model_name, messages=msgs, temperature=0.0, response_format={"type": "json_object"}
)
response_text = completion.choices[0].message.content
clean_text = response_text.strip()
# Nettoyage au cas où le modèle ajoute du markdown
clean_text = re.sub(r'```json\s*|\s*```', '', clean_text)
try:
action = json.loads(clean_text)
# Sécurité Préfixes SPARQL
if action.get("tool") == "execute_sparql":
raw_query = action["args"].get("query", "")
if "PREFIX vortex:" not in raw_query:
action["args"]["query"] = self._get_prefixes() + "\n" + raw_query
# Récupération du Chain of Thought
thought_trace = action.get("thought_trace", planning_log)
return "", action, thought_trace
except json.JSONDecodeError:
pass
return clean_text, None, planning_log
except Exception as e:
last_error = str(e)
continue
return f"⚠️ ERREUR SYSTEME : {last_error}", None, planning_log