Spaces:
Sleeping
Sleeping
| """ | |
| Main execution script for the LangGraph RAG Q&A Agent. | |
| Initializes all components and provides an interface for querying. | |
| """ | |
| import os | |
| import sys | |
| from pathlib import Path | |
| # Add src to path | |
| sys.path.insert(0, str(Path(__file__).parent)) | |
| from rag_pipeline import RAGPipeline | |
| from llm_utils import create_llm_handler | |
| from reflection import create_reflection_evaluator | |
| from agent_workflow import create_rag_agent | |
| def initialize_system( | |
| provider: str = "openai", | |
| use_llm_reflection: bool = False, | |
| rebuild_index: bool = False | |
| ): | |
| """Initialize all system components.""" | |
| from pathlib import Path | |
| # Get correct paths | |
| current_file = Path(__file__).resolve() | |
| project_root = current_file.parent.parent | |
| data_dir = project_root / "data" | |
| chroma_dir = project_root / "chroma_db" | |
| print("\n" + "="*70) | |
| print(" "*15 + "π INITIALIZING RAG Q&A AGENT π") | |
| print("="*70 + "\n") | |
| # Initialize RAG pipeline | |
| print("[1/4] Initializing RAG Pipeline...") | |
| rag_pipeline = RAGPipeline( | |
| data_directory=str(data_dir), | |
| collection_name="rag_knowledge_base", | |
| persist_directory=str(chroma_dir), | |
| chunk_size=500, | |
| chunk_overlap=50 | |
| ) | |
| # Build or load index | |
| print(f"\n[2/4] {'Rebuilding' if rebuild_index else 'Loading'} Vector Index...") | |
| rag_pipeline.build_index(force_rebuild=rebuild_index) | |
| # Initialize LLM handler | |
| print(f"\n[3/4] Initializing LLM Handler (provider={provider})...") | |
| llm_handler = create_llm_handler( | |
| provider=provider, | |
| temperature=0.7, | |
| max_tokens=500 | |
| ) | |
| # Initialize reflection evaluator | |
| print(f"\n[4/4] Initializing Reflection Evaluator...") | |
| if use_llm_reflection: | |
| reflection_evaluator = create_reflection_evaluator( | |
| llm_handler=llm_handler, | |
| use_llm_reflection=True | |
| ) | |
| else: | |
| reflection_evaluator = create_reflection_evaluator( | |
| llm_handler=None, | |
| use_llm_reflection=False | |
| ) | |
| # Create agent | |
| print("\n[β] Creating RAG Agent...") | |
| agent = create_rag_agent( | |
| rag_pipeline=rag_pipeline, | |
| llm_handler=llm_handler, | |
| reflection_evaluator=reflection_evaluator, | |
| max_iterations=2 | |
| ) | |
| print("\n" + "="*70) | |
| print(" "*20 + "β SYSTEM READY β ") | |
| print("="*70 + "\n") | |
| return agent | |
| def run_sample_queries(agent): | |
| """ | |
| Run sample queries to demonstrate the agent. | |
| Args: | |
| agent: Initialized RAG agent | |
| """ | |
| sample_queries = [ | |
| "What is machine learning and what are its types?", | |
| "Explain what Python is used for", | |
| "What is the difference between IaaS, PaaS, and SaaS?", | |
| "Tell me about database normalization" | |
| ] | |
| print("\n" + "="*70) | |
| print(" "*18 + "π RUNNING SAMPLE QUERIES π") | |
| print("="*70 + "\n") | |
| for i, query in enumerate(sample_queries, 1): | |
| print(f"\n{'='*70}") | |
| print(f"SAMPLE QUERY {i}/{len(sample_queries)}") | |
| print(f"{'='*70}\n") | |
| result = agent.query(query) | |
| # Optional: Add delay between queries | |
| import time | |
| if i < len(sample_queries): | |
| print("\nβ³ Waiting 2 seconds before next query...\n") | |
| time.sleep(2) | |
| def interactive_mode(agent): | |
| """ | |
| Run agent in interactive mode. | |
| Args: | |
| agent: Initialized RAG agent | |
| """ | |
| print("\n" + "="*70) | |
| print(" "*18 + "π¬ INTERACTIVE MODE π¬") | |
| print("="*70) | |
| print("\nType your questions below. Type 'exit' or 'quit' to stop.\n") | |
| while True: | |
| try: | |
| user_input = input("You: ").strip() | |
| if not user_input: | |
| continue | |
| if user_input.lower() in ['exit', 'quit', 'q']: | |
| print("\nπ Goodbye!\n") | |
| break | |
| # Process query | |
| agent.query(user_input) | |
| except KeyboardInterrupt: | |
| print("\n\nπ Goodbye!\n") | |
| break | |
| except Exception as e: | |
| print(f"\nβ Error: {e}\n") | |
| def main(): | |
| """Main execution function.""" | |
| import argparse | |
| parser = argparse.ArgumentParser(description="LangGraph RAG Q&A Agent") | |
| parser.add_argument( | |
| "--provider", | |
| type=str, | |
| default="openai", | |
| choices=["openai", "huggingface"], | |
| help="LLM provider to use" | |
| ) | |
| parser.add_argument( | |
| "--llm-reflection", | |
| action="store_true", | |
| help="Use LLM-based reflection (default: heuristic)" | |
| ) | |
| parser.add_argument( | |
| "--rebuild-index", | |
| action="store_true", | |
| help="Force rebuild the vector index" | |
| ) | |
| parser.add_argument( | |
| "--mode", | |
| type=str, | |
| default="sample", | |
| choices=["sample", "interactive"], | |
| help="Execution mode: run sample queries or interactive" | |
| ) | |
| parser.add_argument( | |
| "--query", | |
| type=str, | |
| help="Single query to process (bypasses mode selection)" | |
| ) | |
| args = parser.parse_args() | |
| try: | |
| # Initialize system | |
| agent = initialize_system( | |
| provider=args.provider, | |
| use_llm_reflection=args.llm_reflection, | |
| rebuild_index=args.rebuild_index | |
| ) | |
| # Handle single query | |
| if args.query: | |
| agent.query(args.query) | |
| return | |
| # Run based on mode | |
| if args.mode == "sample": | |
| run_sample_queries(agent) | |
| elif args.mode == "interactive": | |
| interactive_mode(agent) | |
| except Exception as e: | |
| print(f"\nβ Fatal Error: {e}\n") | |
| import traceback | |
| traceback.print_exc() | |
| sys.exit(1) | |
| if __name__ == "__main__": | |
| main() |