Spaces:
No application file
No application file
File size: 1,817 Bytes
b325aad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
from src.agenticRAG.models.state import AgentState
from src.agenticRAG.components.llm_factory import LLMFactory
from src.agenticRAG.components.vectorstore import VectorStoreManager
from src.agenticRAG.prompt.prompts import Prompts
class RAGNode:
"""Node for RAG processing"""
def __init__(self):
self.llm = LLMFactory.get_llm()
self.vectorstore_manager = VectorStoreManager()
self.prompt = Prompts.RAG_RESPONSE
# Load vectorstore
self.vectorstore_manager.load_vectorstore()
def process_rag(self, state: AgentState) -> AgentState:
"""Process RAG path - retrieve from knowledge base"""
try:
# Retrieve documents
docs = self.vectorstore_manager.search_documents(state.upgraded_query, k=3)
state.retrieved_docs = docs
# Generate response with retrieved context
chain = self.prompt | self.llm
context = "\n".join(docs) if docs else "No relevant documents found."
response = chain.invoke({
"query": state.upgraded_query,
"context": context
})
state.final_response = response.content
state.metadata["rag_success"] = True
except Exception as e:
state.final_response = "Sorry, I couldn't retrieve information from the knowledge base."
state.metadata["rag_success"] = False
state.metadata["rag_error"] = str(e)
return state
# Node function for LangGraph
def rag_node(state: AgentState) -> AgentState:
"""Node function for RAG processing"""
rag_processor = RAGNode()
return rag_processor.process_rag(state) |