RAG_architectures / graph_agentA.py
cd@bziiit.com
Fix variables with GraphState
cba7f8e
from typing import TypedDict, Annotated, Literal, Sequence
from langchain_core.messages import BaseMessage
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages
from config import llm, client, langsmith_project
from pinecone_utilsA import *
import streamlit as st
# Graph state definition
class GraphState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
query: str
relevant_docs: list
response: str
k: int
similarity_threshold: float
def generate_response(state: GraphState) -> dict:
"""Generate a response using the LLM."""
context = " ".join(state["relevant_docs"])
prompt = f"""
Vous êtes un expert en analyse de texte. Votre tâche est de répondre à la question de l'utilisateur en utilisant les informations fournies.
Si les informations ne suffisent pas, expliquez pourquoi et proposez une hypothèse si possible.
Informations pertinentes : {context}
Question : {state["query"]}
Réponse :
"""
response = llm.invoke(prompt)
return {"response": response.content}
def retrieve(state: GraphState) -> dict:
"""Récupération sémantique : Pinecone (sémantique)"""
relevant_docs = retrieve_documents(
state["query"],
k=state.get("k"),
similarity_threshold=state.get("similarity_threshold")
)
return {"relevant_docs": relevant_docs}
def post_process_response(state: GraphState) -> dict:
"""Post-process the response."""
response = state["response"].strip() if isinstance(state["response"], str) else state["response"]
return {"response": response}
# Build the graph
graph_builder = StateGraph(GraphState)
graph_builder.add_node("retrieve", retrieve)
graph_builder.add_node("generate", generate_response)
graph_builder.add_node("post_process", post_process_response)
graph_builder.set_entry_point("retrieve")
graph_builder.add_edge("retrieve", "generate")
graph_builder.add_edge("generate", "post_process")
graph_builder.add_edge("post_process", END)
agent = graph_builder.compile()