Spaces:
Sleeping
Sleeping
| """Streamlit app for Hugging Face Spaces deployment.""" | |
| from __future__ import annotations | |
| import asyncio | |
| import os | |
| from typing import Any | |
| import streamlit as st | |
| # Set page config first | |
| st.set_page_config( | |
| page_title="Ask-the-Web Agent", | |
| page_icon="π", | |
| layout="wide", | |
| initial_sidebar_state="expanded", | |
| ) | |
| # Add src to path for imports | |
| import sys | |
| sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) | |
| from src.utils.config import Settings | |
| from src.utils.logging import get_logger | |
| logger = get_logger(__name__) | |
| def get_settings_from_env() -> Settings: | |
| """Get settings, using HF Spaces secrets if available.""" | |
| return Settings() | |
| def init_session_state() -> None: | |
| """Initialize Streamlit session state.""" | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| if "agent" not in st.session_state: | |
| st.session_state.agent = None | |
| if "initialized" not in st.session_state: | |
| st.session_state.initialized = False | |
| if "error" not in st.session_state: | |
| st.session_state.error = None | |
| def load_agent(): | |
| """Load the Ask-the-Web Agent.""" | |
| if st.session_state.agent is None: | |
| try: | |
| from src.agent.agent import AskTheWebAgent | |
| st.session_state.agent = AskTheWebAgent() | |
| st.session_state.initialized = True | |
| st.session_state.error = None | |
| except Exception as e: | |
| st.session_state.error = str(e) | |
| st.session_state.initialized = False | |
| logger.error(f"Failed to initialize agent: {e}") | |
| async def process_query_async(question: str, history: list) -> Any: | |
| """Process query asynchronously.""" | |
| agent = st.session_state.agent | |
| return await agent.query( | |
| question=question, | |
| history=history, | |
| enable_search=True, | |
| max_sources=5, | |
| ) | |
| def main() -> None: | |
| """Main Streamlit application.""" | |
| init_session_state() | |
| settings = get_settings_from_env() | |
| # Sidebar | |
| with st.sidebar: | |
| st.title("βοΈ Settings") | |
| st.subheader("Configuration") | |
| st.text(f"LLM Provider: {settings.llm_provider}") | |
| st.text(f"Model: {settings.llm_model}") | |
| st.divider() | |
| st.subheader("About") | |
| st.markdown(""" | |
| **Ask-the-Web Agent** is an AI-powered assistant that: | |
| - π Searches the web for information | |
| - π§ Analyzes and synthesizes content | |
| - π Provides cited answers | |
| - π‘ Suggests follow-up questions | |
| """) | |
| st.divider() | |
| if st.button("ποΈ Clear Chat"): | |
| st.session_state.messages = [] | |
| st.rerun() | |
| # Display status | |
| if st.session_state.error: | |
| st.error(f"β οΈ {st.session_state.error}") | |
| elif st.session_state.initialized: | |
| st.success("β Agent ready") | |
| else: | |
| st.info("β³ Initializing...") | |
| # Main content | |
| st.title("π Ask-the-Web Agent") | |
| st.caption("AI-powered answers with real-time web search") | |
| # Check for API keys | |
| has_llm_key = bool( | |
| settings.llm_provider == "ollama" or | |
| settings.llm_provider == "huggingface" or # HF works without token for some models | |
| settings.openai_api_key or | |
| settings.anthropic_api_key or | |
| settings.hf_token | |
| ) | |
| if not has_llm_key: | |
| st.warning(""" | |
| β οΈ **No LLM API key configured!** | |
| Please set one of the following in your Hugging Face Space secrets: | |
| - `LLM_PROVIDER=huggingface` (works without token using Zephyr-7B) | |
| - `OPENAI_API_KEY` for OpenAI | |
| - `ANTHROPIC_API_KEY` for Anthropic | |
| """) | |
| # Load agent | |
| if not st.session_state.initialized and has_llm_key: | |
| with st.spinner("Initializing agent..."): | |
| load_agent() | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| if message["role"] == "assistant": | |
| if "sources" in message and message["sources"]: | |
| with st.expander("π Sources", expanded=False): | |
| for i, source in enumerate(message["sources"], 1): | |
| title = source.get("title", "Source") | |
| url = source.get("url", "") | |
| if url: | |
| st.markdown(f"{i}. [{title}]({url})") | |
| else: | |
| st.markdown(f"{i}. {title}") | |
| if "follow_ups" in message and message["follow_ups"]: | |
| with st.expander("π‘ Related questions", expanded=False): | |
| for q in message["follow_ups"]: | |
| st.markdown(f"β’ {q}") | |
| # Handle pending follow-up question | |
| if "pending_question" in st.session_state: | |
| prompt = st.session_state.pending_question | |
| del st.session_state.pending_question | |
| else: | |
| prompt = st.chat_input("Ask me anything..." if st.session_state.initialized else "Waiting for initialization...") | |
| # Process new question | |
| if prompt and st.session_state.initialized: | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| with st.chat_message("assistant"): | |
| with st.spinner("Searching and analyzing..."): | |
| try: | |
| history = [ | |
| {"role": m["role"], "content": m["content"]} | |
| for m in st.session_state.messages[:-1] | |
| ] | |
| response = asyncio.run(process_query_async(prompt, history)) | |
| st.markdown(response.answer) | |
| message_data = { | |
| "role": "assistant", | |
| "content": response.answer, | |
| "sources": response.sources, | |
| "follow_ups": response.follow_up_questions, | |
| "confidence": response.confidence, | |
| } | |
| st.session_state.messages.append(message_data) | |
| confidence = response.confidence | |
| if confidence > 0.7: | |
| st.success(f"Confidence: {confidence:.0%}") | |
| elif confidence > 0.4: | |
| st.warning(f"Confidence: {confidence:.0%}") | |
| else: | |
| st.info(f"Confidence: {confidence:.0%}") | |
| if response.sources: | |
| with st.expander("π Sources", expanded=True): | |
| for i, source in enumerate(response.sources, 1): | |
| title = source.get("title", "Source") | |
| url = source.get("url", "") | |
| if url: | |
| st.markdown(f"{i}. [{title}]({url})") | |
| else: | |
| st.markdown(f"{i}. {title}") | |
| if response.follow_up_questions: | |
| st.markdown("---") | |
| st.markdown("*Related questions:*") | |
| cols = st.columns(min(len(response.follow_up_questions), 3)) | |
| for i, q in enumerate(response.follow_up_questions[:3]): | |
| with cols[i]: | |
| if st.button(q, key=f"followup_{hash(q)}_{i}"): | |
| st.session_state.pending_question = q | |
| st.rerun() | |
| except Exception as e: | |
| error_msg = f"Error: {e}" | |
| st.error(error_msg) | |
| st.session_state.messages.append({ | |
| "role": "assistant", | |
| "content": f"Sorry, I encountered an error: {e}", | |
| }) | |
| if __name__ == "__main__": | |
| main() | |