research-agent / main.py
Punit1's picture
Optimize for HF Spaces - clean Dockerfile and enhanced UI
ad29580
import streamlit as st
import uuid
import os
from dotenv import load_dotenv
from src.graph import app
# Load environment variables (works with .env file or HF Spaces secrets)
load_dotenv()
# --- Streamlit UI Configuration ---
st.set_page_config(
page_title="Autonomous Research Agent",
page_icon="πŸ€–",
layout="wide",
initial_sidebar_state="collapsed"
)
st.title("πŸ€– Autonomous Research Agent")
st.markdown("""
Welcome! This autonomous agent will research any topic for you using AI-powered web search and analysis.
**How it works:**
1. Enter your research topic below
2. The agent will search the web, evaluate sources, and summarize findings
3. Get a comprehensive research report in minutes
*Powered by LangGraph, Groq (Llama 3.3), and Tavily AI*
""")
# Check for API keys
if not os.getenv("GROQ_API_KEY") or not os.getenv("TAVILY_API_KEY"):
st.error("⚠️ API keys not configured. Please add GROQ_API_KEY and TAVILY_API_KEY in Space Settings β†’ Repository Secrets.")
st.stop()
# --- Session State Management ---
if "thread_id" not in st.session_state:
st.session_state.thread_id = str(uuid.uuid4())
st.session_state.messages = []
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# --- Main Application Logic ---
if prompt := st.chat_input("What topic should I research for you?"):
# Add user's message to session state and display it
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Prepare to display the agent's response
with st.chat_message("assistant"):
# Use a status container to show the agent's progress
with st.status("πŸ” Researching...", expanded=True) as status:
final_report = ""
# LangGraph configuration for the specific session
config = {"configurable": {"thread_id": st.session_state.thread_id}}
initial_state = {"topic": prompt, "summaries": []}
# Stream events from the LangGraph agent
for event in app.stream(initial_state, config=config):
for key, value in event.items():
if key == "search":
status.write("πŸ” Searching for relevant articles...")
elif key == "scrape_and_summarize":
if value.get("scraped_content"):
url = value['scraped_content'].get('url', 'Unknown URL')
is_relevant = value['scraped_content'].get('is_relevant', 'Unknown')
status.write(f"πŸ“„ Evaluating: {url} - Relevant: {is_relevant}")
elif key == "summarize":
status.write("πŸ“ Summarizing relevant content...")
elif key == "compile_report":
status.write("πŸ“Š Compiling the final report...")
if value.get("report"):
final_report = value["report"]
# Update the status to "complete" when done
status.update(label="βœ… Research complete!", state="complete", expanded=False)
# Display the final report
st.markdown(final_report)
# Add the final report to the session state
st.session_state.messages.append({"role": "assistant", "content": final_report})