File size: 3,499 Bytes
52e9d16
 
ad29580
52e9d16
 
 
ad29580
52e9d16
 
 
ad29580
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52e9d16
 
 
 
ad29580
52e9d16
ad29580
52e9d16
 
 
 
 
 
 
 
 
 
 
 
 
 
ad29580
52e9d16
 
 
 
ad29580
52e9d16
 
 
 
 
ad29580
 
52e9d16
 
 
ad29580
52e9d16
ad29580
52e9d16
ad29580
52e9d16
 
 
 
ad29580
52e9d16
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import streamlit as st
import uuid
import os
from dotenv import load_dotenv
from src.graph import app

# Load environment variables (works with .env file or HF Spaces secrets)
load_dotenv()

# --- Streamlit UI Configuration ---
st.set_page_config(
    page_title="Autonomous Research Agent",
    page_icon="πŸ€–",
    layout="wide",
    initial_sidebar_state="collapsed"
)

st.title("πŸ€– Autonomous Research Agent")
st.markdown("""
Welcome! This autonomous agent will research any topic for you using AI-powered web search and analysis.

**How it works:**
1. Enter your research topic below
2. The agent will search the web, evaluate sources, and summarize findings
3. Get a comprehensive research report in minutes

*Powered by LangGraph, Groq (Llama 3.3), and Tavily AI*
""")

# Check for API keys
if not os.getenv("GROQ_API_KEY") or not os.getenv("TAVILY_API_KEY"):
    st.error("⚠️ API keys not configured. Please add GROQ_API_KEY and TAVILY_API_KEY in Space Settings β†’ Repository Secrets.")
    st.stop()

# --- Session State Management ---
if "thread_id" not in st.session_state:
    st.session_state.thread_id = str(uuid.uuid4())
    st.session_state.messages = []

# Display chat history
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# --- Main Application Logic ---
if prompt := st.chat_input("What topic should I research for you?"):
    # Add user's message to session state and display it
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)

    # Prepare to display the agent's response
    with st.chat_message("assistant"):
        # Use a status container to show the agent's progress
        with st.status("πŸ” Researching...", expanded=True) as status:
            final_report = ""
            
            # LangGraph configuration for the specific session
            config = {"configurable": {"thread_id": st.session_state.thread_id}}
            initial_state = {"topic": prompt, "summaries": []}

            # Stream events from the LangGraph agent
            for event in app.stream(initial_state, config=config):
                for key, value in event.items():
                    if key == "search":
                        status.write("πŸ” Searching for relevant articles...")
                    elif key == "scrape_and_summarize":
                        if value.get("scraped_content"):
                            url = value['scraped_content'].get('url', 'Unknown URL')
                            is_relevant = value['scraped_content'].get('is_relevant', 'Unknown')
                            status.write(f"πŸ“„ Evaluating: {url} - Relevant: {is_relevant}")
                    elif key == "summarize":
                        status.write("πŸ“ Summarizing relevant content...")
                    elif key == "compile_report":
                        status.write("πŸ“Š Compiling the final report...")
                        if value.get("report"):
                            final_report = value["report"]
            
            # Update the status to "complete" when done
            status.update(label="βœ… Research complete!", state="complete", expanded=False)
        
        # Display the final report
        st.markdown(final_report)

    # Add the final report to the session state
    st.session_state.messages.append({"role": "assistant", "content": final_report})