Harsh-1132's picture
Fix: Remove stcli.main() to fix Runtime error
f393265
"""
Hugging Face Space entry point for LangGraph RAG Q&A Agent
"""
import os
import sys
from pathlib import Path
# Add src to path
current_dir = Path(__file__).parent
sys.path.insert(0, str(current_dir / "src"))
# Setup persistent cache for models (HF Spaces provides /data directory)
if os.path.exists("/data"):
cache_dir = "/data/models_cache"
else:
cache_dir = str(current_dir / "models_cache")
os.makedirs(cache_dir, exist_ok=True)
# Set cache environment variables
os.environ['TRANSFORMERS_CACHE'] = cache_dir
os.environ['HF_HOME'] = cache_dir
os.environ['TORCH_HOME'] = cache_dir
os.environ['SENTENCE_TRANSFORMERS_HOME'] = cache_dir
# Set default configuration
os.environ.setdefault('LLM_PROVIDER', 'huggingface')
os.environ.setdefault('HUGGINGFACE_MODEL', 'google/flan-t5-large')
os.environ.setdefault('EMBEDDING_MODEL', 'sentence-transformers/all-MiniLM-L6-v2')
os.environ.setdefault('CHROMA_PERSIST_DIR', './chroma_db')
os.environ.setdefault('CHROMA_COLLECTION_NAME', 'rag_knowledge_base')
os.environ.setdefault('CHUNK_SIZE', '500')
os.environ.setdefault('CHUNK_OVERLAP', '50')
os.environ.setdefault('TOP_K_RETRIEVAL', '3')
os.environ.setdefault('USE_LLM_REFLECTION', 'false')
os.environ.setdefault('MAX_REFLECTION_ITERATIONS', '2')
print("=" * 70)
print("πŸš€ LangGraph RAG Q&A Agent - Hugging Face Space")
print("=" * 70)
print(f"πŸ“¦ Cache Directory: {cache_dir}")
print(f"πŸ€– LLM Model: {os.environ['HUGGINGFACE_MODEL']}")
print(f"πŸ”€ Embedding Model: {os.environ['EMBEDDING_MODEL']}")
print("=" * 70)
# Import the main Streamlit app (DO NOT call stcli.main())
# HuggingFace Spaces will automatically run this file with Streamlit
from src.ui_app import main
if __name__ == "__main__":
main()