Spaces:
Running
Running
Commit
·
9760e1f
1
Parent(s):
438d4f9
Sidebar updated
Browse files- __pycache__/graph_agentA.cpython-310.pyc +0 -0
- __pycache__/graph_agentB.cpython-310.pyc +0 -0
- __pycache__/pinecone_utilsA.cpython-310.pyc +0 -0
- app.py +49 -26
- graph_agentA.py +6 -1
- graph_agentB.py +3 -1
- pinecone_utilsA.py +11 -4
__pycache__/graph_agentA.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/graph_agentA.cpython-310.pyc and b/__pycache__/graph_agentA.cpython-310.pyc differ
|
|
|
__pycache__/graph_agentB.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/graph_agentB.cpython-310.pyc and b/__pycache__/graph_agentB.cpython-310.pyc differ
|
|
|
__pycache__/pinecone_utilsA.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/pinecone_utilsA.cpython-310.pyc and b/__pycache__/pinecone_utilsA.cpython-310.pyc differ
|
|
|
app.py
CHANGED
|
@@ -39,6 +39,9 @@ def process_query(query, architecture: Literal["A", "B", "C"]):
|
|
| 39 |
display_chat_history()
|
| 40 |
|
| 41 |
config = {"metadata": {"architecture": architecture}, "tags": ["arch_" + architecture]}
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
if architecture == "A":
|
| 44 |
agent = agent_A
|
|
@@ -47,16 +50,16 @@ def process_query(query, architecture: Literal["A", "B", "C"]):
|
|
| 47 |
"messages": [],
|
| 48 |
"relevant_docs": [],
|
| 49 |
"response": "",
|
|
|
|
|
|
|
| 50 |
}
|
| 51 |
|
| 52 |
elif architecture in ["B", "C"]:
|
| 53 |
agent = agent_B if architecture == "B" else agent_C
|
| 54 |
|
| 55 |
-
# Récupération
|
| 56 |
-
k = st.session_state.get("k", 30) # Nombre de documents
|
| 57 |
alpha = st.session_state.get("alpha", 0.5) # Pondération hybride
|
| 58 |
-
|
| 59 |
-
|
| 60 |
initial_state = {
|
| 61 |
"query": query,
|
| 62 |
"messages": [],
|
|
@@ -147,21 +150,35 @@ def display_sidebar():
|
|
| 147 |
# Token metrics containers
|
| 148 |
# st.sidebar.markdown("### Tokens")
|
| 149 |
|
| 150 |
-
st.markdown("### Paramètres de la recherche
|
| 151 |
|
| 152 |
# Sélection du nombre de documents (k)
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
-
# Sauvegarde des valeurs dans la session
|
| 162 |
-
st.session_state['k'] = k
|
| 163 |
-
st.session_state['alpha'] = alpha
|
| 164 |
-
st.session_state['similarité'] = similarity_threshold
|
| 165 |
|
| 166 |
st.sidebar.markdown("### API Mistral AI")
|
| 167 |
|
|
@@ -204,7 +221,6 @@ def display_sidebar():
|
|
| 204 |
st.sidebar.markdown("2025 : Open source en Licence MIT")
|
| 205 |
st.sidebar.markdown("info@bziiit.com")
|
| 206 |
|
| 207 |
-
|
| 208 |
def display_chat_history():
|
| 209 |
"""Affiche l'historique de chat."""
|
| 210 |
for message in st.session_state.chat_history:
|
|
@@ -256,21 +272,22 @@ def main():
|
|
| 256 |
on_change=st.session_state.chat_history.clear
|
| 257 |
)
|
| 258 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
display_sidebar()
|
| 260 |
|
| 261 |
-
if
|
| 262 |
display_chat_history()
|
| 263 |
|
| 264 |
query = st.chat_input("Posez votre question ici:")
|
| 265 |
if query:
|
| 266 |
-
|
| 267 |
-
architecture = "B"
|
| 268 |
-
elif architecture == "Avancée":
|
| 269 |
-
architecture = "C"
|
| 270 |
-
else:
|
| 271 |
-
architecture = "A"
|
| 272 |
-
|
| 273 |
-
process_query(query, architecture)
|
| 274 |
|
| 275 |
|
| 276 |
def calculate_tokens_cost():
|
|
@@ -290,6 +307,12 @@ def initialize_conversation():
|
|
| 290 |
'output_tokens': 0,
|
| 291 |
'total_tokens': 0
|
| 292 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
calculate_tokens_cost()
|
| 294 |
|
| 295 |
|
|
|
|
| 39 |
display_chat_history()
|
| 40 |
|
| 41 |
config = {"metadata": {"architecture": architecture}, "tags": ["arch_" + architecture]}
|
| 42 |
+
# Récupération des paramètres dynamiques uniquement
|
| 43 |
+
k = st.session_state.get("k", 30) # Nombre de documents
|
| 44 |
+
similarity_threshold = st.session_state.get("similarity_threshold", 0.7) # similarité cosinus
|
| 45 |
|
| 46 |
if architecture == "A":
|
| 47 |
agent = agent_A
|
|
|
|
| 50 |
"messages": [],
|
| 51 |
"relevant_docs": [],
|
| 52 |
"response": "",
|
| 53 |
+
"k": k,
|
| 54 |
+
"alpha": alpha,
|
| 55 |
}
|
| 56 |
|
| 57 |
elif architecture in ["B", "C"]:
|
| 58 |
agent = agent_B if architecture == "B" else agent_C
|
| 59 |
|
| 60 |
+
# Récupération du paramétre alpha uniquement pour B et C
|
|
|
|
| 61 |
alpha = st.session_state.get("alpha", 0.5) # Pondération hybride
|
| 62 |
+
|
|
|
|
| 63 |
initial_state = {
|
| 64 |
"query": query,
|
| 65 |
"messages": [],
|
|
|
|
| 150 |
# Token metrics containers
|
| 151 |
# st.sidebar.markdown("### Tokens")
|
| 152 |
|
| 153 |
+
st.markdown("### Paramètres de la recherche RAG")
|
| 154 |
|
| 155 |
# Sélection du nombre de documents (k)
|
| 156 |
+
st.markdown("""
|
| 157 |
+
**Nombre de documents à récupérer (k)**
|
| 158 |
+
<small>🛈 Détermine combien de documents seront récupérés lors de la recherche.</small>
|
| 159 |
+
""", unsafe_allow_html=True)
|
| 160 |
+
|
| 161 |
+
st.number_input(" ", min_value=1, max_value=100, value=30, step=1, key="k")
|
| 162 |
+
|
| 163 |
+
# Sélection du score de similarité cosinus
|
| 164 |
+
st.markdown("""
|
| 165 |
+
**Score de similarité cosinus**
|
| 166 |
+
<small>🛈 Ce paramètre définit le seuil minimal de similarité entre deux vecteurs. Plus il est élevé, plus seuls les éléments très similaires seront considérés comme correspondants.</small>
|
| 167 |
+
""", unsafe_allow_html=True)
|
| 168 |
+
st.slider(" ", 0.0, 1.0, value=0.7, step=0.05, key="similarity_threshold")
|
| 169 |
+
|
| 170 |
+
# Afficher alpha uniquement pour B et C
|
| 171 |
+
if st.session_state.get("architecture") in ["B", "C"]:
|
| 172 |
+
st.markdown("""
|
| 173 |
+
**Équilibre entre recherche sémantique et syntaxique**
|
| 174 |
+
<small>🛈 Pour `alpha = 0.0`, la recherche est purement syntaxique. Pour `alpha = 1.0`, elle est purement sémantique.</small>
|
| 175 |
+
""", unsafe_allow_html=True)
|
| 176 |
+
|
| 177 |
+
st.slider(" ", 0.0, 1.0, value=0.5, step=0.05, key="alpha") # Sauvegarde de la valeur alpha
|
| 178 |
+
else:
|
| 179 |
+
# Réinitialiser alpha si l'architecture est A
|
| 180 |
+
st.session_state['alpha'] = None
|
| 181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
|
| 183 |
st.sidebar.markdown("### API Mistral AI")
|
| 184 |
|
|
|
|
| 221 |
st.sidebar.markdown("2025 : Open source en Licence MIT")
|
| 222 |
st.sidebar.markdown("info@bziiit.com")
|
| 223 |
|
|
|
|
| 224 |
def display_chat_history():
|
| 225 |
"""Affiche l'historique de chat."""
|
| 226 |
for message in st.session_state.chat_history:
|
|
|
|
| 272 |
on_change=st.session_state.chat_history.clear
|
| 273 |
)
|
| 274 |
|
| 275 |
+
# Sauvegarder l'architecture dans st.session_state
|
| 276 |
+
if architecture == "Intermédiaire":
|
| 277 |
+
st.session_state["architecture"] = "B"
|
| 278 |
+
elif architecture == "Avancée":
|
| 279 |
+
st.session_state["architecture"] = "C"
|
| 280 |
+
else:
|
| 281 |
+
st.session_state["architecture"] = "A"
|
| 282 |
+
|
| 283 |
display_sidebar()
|
| 284 |
|
| 285 |
+
if st.session_state.chat_history:
|
| 286 |
display_chat_history()
|
| 287 |
|
| 288 |
query = st.chat_input("Posez votre question ici:")
|
| 289 |
if query:
|
| 290 |
+
process_query(query, st.session_state["architecture"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
|
| 292 |
|
| 293 |
def calculate_tokens_cost():
|
|
|
|
| 307 |
'output_tokens': 0,
|
| 308 |
'total_tokens': 0
|
| 309 |
}
|
| 310 |
+
if "k" not in st.session_state:
|
| 311 |
+
st.session_state['k'] = 30 # Valeur par défaut pour k
|
| 312 |
+
if "similarity_threshold" not in st.session_state:
|
| 313 |
+
st.session_state['similarity_threshold'] = 0.7 # Valeur par défaut pour similarity_threshold
|
| 314 |
+
if "alpha" not in st.session_state:
|
| 315 |
+
st.session_state['alpha'] = 0.5
|
| 316 |
calculate_tokens_cost()
|
| 317 |
|
| 318 |
|
graph_agentA.py
CHANGED
|
@@ -4,6 +4,7 @@ from langgraph.graph import StateGraph, END
|
|
| 4 |
from langgraph.graph.message import add_messages
|
| 5 |
from config import llm, client, langsmith_project
|
| 6 |
from pinecone_utilsA import *
|
|
|
|
| 7 |
|
| 8 |
# Graph state definition
|
| 9 |
class GraphState(TypedDict):
|
|
@@ -11,6 +12,8 @@ class GraphState(TypedDict):
|
|
| 11 |
query: str
|
| 12 |
relevant_docs: list
|
| 13 |
response: str
|
|
|
|
|
|
|
| 14 |
|
| 15 |
def generate_response(state: GraphState) -> dict:
|
| 16 |
"""Generate a response using the LLM."""
|
|
@@ -33,9 +36,11 @@ def post_process_response(state: GraphState) -> dict:
|
|
| 33 |
response = state["response"].strip() if isinstance(state["response"], str) else state["response"]
|
| 34 |
return {"response": response}
|
| 35 |
|
|
|
|
|
|
|
| 36 |
# Build the graph
|
| 37 |
graph_builder = StateGraph(GraphState)
|
| 38 |
-
graph_builder.add_node("retrieve", lambda state: {"relevant_docs": retrieve_documents(state["query"])})
|
| 39 |
graph_builder.add_node("generate", generate_response)
|
| 40 |
graph_builder.add_node("post_process", post_process_response)
|
| 41 |
|
|
|
|
| 4 |
from langgraph.graph.message import add_messages
|
| 5 |
from config import llm, client, langsmith_project
|
| 6 |
from pinecone_utilsA import *
|
| 7 |
+
import streamlit as st
|
| 8 |
|
| 9 |
# Graph state definition
|
| 10 |
class GraphState(TypedDict):
|
|
|
|
| 12 |
query: str
|
| 13 |
relevant_docs: list
|
| 14 |
response: str
|
| 15 |
+
k: int
|
| 16 |
+
similarity_threshold: float
|
| 17 |
|
| 18 |
def generate_response(state: GraphState) -> dict:
|
| 19 |
"""Generate a response using the LLM."""
|
|
|
|
| 36 |
response = state["response"].strip() if isinstance(state["response"], str) else state["response"]
|
| 37 |
return {"response": response}
|
| 38 |
|
| 39 |
+
k = st.session_state.get("k", 30) # Valeur par défaut 30
|
| 40 |
+
similarity_threshold = st.session_state.get('similarity_threshold', 0.7) # Valeur par défaut 0.7
|
| 41 |
# Build the graph
|
| 42 |
graph_builder = StateGraph(GraphState)
|
| 43 |
+
graph_builder.add_node("retrieve", lambda state: {"relevant_docs": retrieve_documents(state["query"], k=k, similarity_threshold=similarity_threshold)})
|
| 44 |
graph_builder.add_node("generate", generate_response)
|
| 45 |
graph_builder.add_node("post_process", post_process_response)
|
| 46 |
|
graph_agentB.py
CHANGED
|
@@ -22,7 +22,9 @@ def retrieve_combined(state: GraphState) -> dict:
|
|
| 22 |
alpha = st.session_state.get("alpha", 0.5) # Valeur par défaut 0.5
|
| 23 |
similarity_threshold = st.session_state.get('similarity_threshold', 0.7) # Valeur par défaut 0.7
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
| 26 |
relevant_docs = hybrid_search(state["query"], alpha=alpha, k=k, similarity_threshold=similarity_threshold)
|
| 27 |
return {"relevant_docs": relevant_docs}
|
| 28 |
|
|
|
|
| 22 |
alpha = st.session_state.get("alpha", 0.5) # Valeur par défaut 0.5
|
| 23 |
similarity_threshold = st.session_state.get('similarity_threshold', 0.7) # Valeur par défaut 0.7
|
| 24 |
|
| 25 |
+
print(f"k: {k}")
|
| 26 |
+
print(f"similarity_threshold: {similarity_threshold}")
|
| 27 |
+
print(f"alpha: {alpha}")
|
| 28 |
relevant_docs = hybrid_search(state["query"], alpha=alpha, k=k, similarity_threshold=similarity_threshold)
|
| 29 |
return {"relevant_docs": relevant_docs}
|
| 30 |
|
pinecone_utilsA.py
CHANGED
|
@@ -71,16 +71,23 @@ def index_pdf(texts):
|
|
| 71 |
indexA.upsert([(f"vec_{i}_{j}", vector.tolist(), metadata)])
|
| 72 |
print(f"Indexation réussie pour le morceau '{small_chunk[:2000]}...'")
|
| 73 |
|
| 74 |
-
def retrieve_documents(query):
|
| 75 |
"""Récupère les documents pertinents en fonction de la requête."""
|
| 76 |
model = SentenceTransformer('intfloat/multilingual-e5-large')
|
| 77 |
query_vector = model.encode([query]).tolist()[0]
|
| 78 |
-
results = indexA.query(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
relevant_docs = []
|
| 80 |
for match in results.get("matches", []):
|
| 81 |
if "metadata" in match and "compressed_text" in match["metadata"]:
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
| 84 |
else:
|
| 85 |
print(f"Skipping match due to missing metadata or compressed_text: {match}")
|
| 86 |
return relevant_docs
|
|
|
|
| 71 |
indexA.upsert([(f"vec_{i}_{j}", vector.tolist(), metadata)])
|
| 72 |
print(f"Indexation réussie pour le morceau '{small_chunk[:2000]}...'")
|
| 73 |
|
| 74 |
+
def retrieve_documents(query, k, similarity_threshold):
|
| 75 |
"""Récupère les documents pertinents en fonction de la requête."""
|
| 76 |
model = SentenceTransformer('intfloat/multilingual-e5-large')
|
| 77 |
query_vector = model.encode([query]).tolist()[0]
|
| 78 |
+
results = indexA.query(
|
| 79 |
+
vector=query_vector,
|
| 80 |
+
top_k=k,
|
| 81 |
+
include_metadata=True
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
relevant_docs = []
|
| 85 |
for match in results.get("matches", []):
|
| 86 |
if "metadata" in match and "compressed_text" in match["metadata"]:
|
| 87 |
+
score = match.get("score", 0) # Score de similarité
|
| 88 |
+
if score >= similarity_threshold: # Filtrer par seuil
|
| 89 |
+
compressed_text = match["metadata"]["compressed_text"]
|
| 90 |
+
relevant_docs.append(decompress_text(compressed_text))
|
| 91 |
else:
|
| 92 |
print(f"Skipping match due to missing metadata or compressed_text: {match}")
|
| 93 |
return relevant_docs
|