GhufranAI's picture
Upload 3 files
3e31e00 verified
"""
Streamlit UI for Multi-Agent Research Assistant (Tavily Version)
=================================================================
Features:
- Clean, professional interface
- Real-time agent execution visualization
- Interactive tool selection
- Source citations with links
- Export reports
- Session history
Run: streamlit run app.py
"""
import streamlit as st
from datetime import datetime
import json
import time
# Import your multi-agent system
from multi_agent_assistant import (
MultiAgentSystem,
Config,
TAVILY_AVAILABLE
)
# ═══════════════════════════════════════════════════════════════════════════
# PAGE CONFIG
# ═══════════════════════════════════════════════════════════════════════════
st.set_page_config(
page_title="Multi-Agent Research Assistant",
page_icon="🤖",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS
st.markdown("""
<style>
.main-header {
font-size: 2.5rem;
font-weight: bold;
color: #1f77b4;
text-align: center;
margin-bottom: 1rem;
}
.sub-header {
font-size: 1.2rem;
color: #666;
text-align: center;
margin-bottom: 2rem;
}
.agent-box {
padding: 1rem;
border-radius: 0.5rem;
border-left: 4px solid;
margin: 1rem 0;
}
.researcher { border-color: #1f77b4; background-color: #e3f2fd; }
.analyst { border-color: #ff7f0e; background-color: #fff3e0; }
.writer { border-color: #2ca02c; background-color: #e8f5e9; }
.critic { border-color: #d62728; background-color: #ffebee; }
.source-card {
padding: 1rem;
border-radius: 0.5rem;
background-color: #f5f5f5;
margin: 0.5rem 0;
}
.metric-card {
padding: 1rem;
border-radius: 0.5rem;
background-color: #ffffff;
border: 1px solid #e0e0e0;
text-align: center;
}
</style>
""", unsafe_allow_html=True)
# ═══════════════════════════════════════════════════════════════════════════
# SESSION STATE INITIALIZATION
# ═══════════════════════════════════════════════════════════════════════════
if 'system' not in st.session_state:
st.session_state.system = None
if 'history' not in st.session_state:
st.session_state.history = []
if 'current_research' not in st.session_state:
st.session_state.current_research = None
if 'agent_logs' not in st.session_state:
st.session_state.agent_logs = []
# ═══════════════════════════════════════════════════════════════════════════
# HELPER FUNCTIONS
# ═══════════════════════════════════════════════════════════════════════════
def initialize_system(hf_token: str, tavily_key: str):
"""Initialize the multi-agent system"""
try:
with st.spinner("🚀 Initializing Multi-Agent System..."):
system = MultiAgentSystem(
hf_token=hf_token,
tavily_key=tavily_key,
max_iterations=2
)
st.session_state.system = system
return True
except Exception as e:
st.error(f"Initialization failed: {str(e)}")
return False
def display_agent_activity(step: str, agent_name: str, content: str):
"""Display agent activity in real-time"""
agent_colors = {
"Researcher": "researcher",
"Analyst": "analyst",
"Writer": "writer",
"Critic": "critic"
}
color_class = agent_colors.get(agent_name, "researcher")
st.markdown(f"""
<div class="agent-box {color_class}">
<strong>🤖 {agent_name} Agent</strong><br/>
<small>{content}</small>
</div>
""", unsafe_allow_html=True)
def format_report(report_output, research_output, critique_output):
"""Format the final report"""
st.markdown("---")
st.markdown("## 📄 Research Report")
# Title
st.markdown(f"### {report_output.title}")
# Content
st.markdown(report_output.content)
# Metadata section
st.markdown("---")
st.markdown("### 📊 Research Metadata")
col1, col2, col3 = st.columns(3)
with col1:
st.markdown(f"""
<div class="metric-card">
<h4>Sources</h4>
<p>{', '.join(research_output.sources_used)}</p>
</div>
""", unsafe_allow_html=True)
with col2:
st.markdown(f"""
<div class="metric-card">
<h4>Confidence</h4>
<p>{research_output.confidence*100:.0f}%</p>
</div>
""", unsafe_allow_html=True)
with col3:
st.markdown(f"""
<div class="metric-card">
<h4>Quality Score</h4>
<p>{critique_output.score:.1f}/10</p>
</div>
""", unsafe_allow_html=True)
# Web sources
if research_output.web_sources:
st.markdown("### 🌐 Web References")
for i, source in enumerate(research_output.web_sources, 1):
st.markdown(f"""
<div class="source-card">
<strong>{i}. {source['title']}</strong><br/>
<a href="{source['url']}" target="_blank">{source['url']}</a>
</div>
""", unsafe_allow_html=True)
def export_report(report_output, research_output):
"""Generate downloadable report"""
content = f"""# {report_output.title}
{report_output.content}
---
## Metadata
- **Sources:** {', '.join(research_output.sources_used)}
- **Confidence:** {research_output.confidence*100:.0f}%
- **Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
"""
if research_output.web_sources:
content += "\n## Web References\n\n"
for i, source in enumerate(research_output.web_sources, 1):
content += f"{i}. [{source['title']}]({source['url']})\n"
return content
# ═══════════════════════════════════════════════════════════════════════════
# SIDEBAR
# ═══════════════════════════════════════════════════════════════════════════
with st.sidebar:
st.markdown("# ⚙️ Configuration")
# API Keys
st.markdown("## 🔑 API Keys")
hf_token = st.text_input(
"Hugging Face Token",
type="password",
value=Config.HF_TOKEN if Config.HF_TOKEN else "",
help="Get from: https://huggingface.co/settings/tokens"
)
tavily_key = st.text_input(
"Tavily API Key",
type="password",
value=Config.TAVILY_API_KEY if Config.TAVILY_API_KEY else "",
help="Get FREE key from: https://tavily.com/"
)
if st.button("🚀 Initialize System", type="primary", use_container_width=True):
if not hf_token or not tavily_key:
st.error("Both tokens required!")
else:
if initialize_system(hf_token, tavily_key):
st.success("✅ System Ready!")
st.markdown("---")
# System Status
st.markdown("## 📊 System Status")
if st.session_state.system:
st.success("🟢 Online")
st.info(f"📚 Queries: {len(st.session_state.history)}")
else:
st.error("🔴 Offline")
if not TAVILY_AVAILABLE:
st.warning("⚠️ Tavily not installed")
st.markdown("---")
# Example queries
st.markdown("## 💡 Example Queries")
examples = {
"Math": "what is 125*8+47",
"Knowledge": "explain deep learning",
"Current Events": "latest AI news December 2025",
"Web Search": "current Bitcoin price"
}
for category, query in examples.items():
if st.button(f"{category}", use_container_width=True):
st.session_state.example_query = query
st.markdown("---")
# Clear history
if st.button("🗑️ Clear History", use_container_width=True):
st.session_state.history = []
st.session_state.current_research = None
st.rerun()
st.markdown("---")
# About
with st.expander("ℹ️ About"):
st.markdown("""
**Multi-Agent Research Assistant**
An Agentic AI system with:
- 🔍 Tavily web search
- 🧮 Calculator tool
- 📚 Knowledge base
- 🤖 4 specialized agents
- ♻️ Iterative refinement
**Tools:**
- LangGraph (orchestration)
- Tavily (AI-optimized search)
- Llama 3.1 8B (reasoning)
**Version:** 2.0
""")
# ═══════════════════════════════════════════════════════════════════════════
# MAIN CONTENT
# ═══════════════════════════════════════════════════════════════════════════
# Header
st.markdown('<div class="main-header">🤖 Multi-Agent Research Assistant</div>', unsafe_allow_html=True)
st.markdown('<div class="sub-header">Powered by Tavily AI-Optimized Search & Agentic AI With LangGraph</div>', unsafe_allow_html=True)
# Check system status
if not st.session_state.system:
st.warning("⚠️ Please initialize the system using the sidebar")
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("""
### 🔑 Step 1: Get API Keys
**Hugging Face (FREE)**
- [Get token](https://huggingface.co/settings/tokens)
- No credit card needed
**Tavily (FREE)**
- [Get key](https://tavily.com/)
- 1,000 searches/month free
""")
with col2:
st.markdown("""
### ⚙️ Step 2: Initialize
1. Enter tokens in sidebar
2. Click "Initialize System"
3. Wait ~10 seconds
4. Start researching!
""")
with col3:
st.markdown("""
### 💡 Step 3: Ask Questions
Try:
- Math calculations
- General knowledge
- Current events
- Web research
""")
st.stop()
# Main Interface
st.markdown("## 🔍 Research Query")
# Query input
query_col, button_col = st.columns([4, 1])
with query_col:
# Check if example query exists
default_query = st.session_state.get('example_query', '')
if default_query:
query = st.text_input(
"What would you like to research?",
value=default_query,
placeholder="e.g., latest AI developments, what is 25*4, explain machine learning"
)
# Clear example query after use
del st.session_state.example_query
else:
query = st.text_input(
"What would you like to research?",
placeholder="e.g., latest AI developments, what is 25*4, explain machine learning"
)
with button_col:
st.markdown("<br/>", unsafe_allow_html=True)
research_button = st.button("🚀 Research", type="primary", use_container_width=True)
# Execute research
if research_button and query:
st.markdown("---")
st.markdown("## 🤖 Agent Activity")
# Progress container
progress_placeholder = st.empty()
agent_placeholder = st.empty()
try:
# Show progress
with progress_placeholder:
progress_bar = st.progress(0)
status_text = st.empty()
# Execute research with progress updates
with st.spinner("🔍 Research in progress..."):
# Agent 1: Researcher
status_text.text("🔍 Researcher Agent: Gathering information...")
progress_bar.progress(25)
final_state = st.session_state.system.research(query)
# Agent 2: Analyst
status_text.text("📊 Analyst Agent: Analyzing findings...")
progress_bar.progress(50)
time.sleep(0.5)
# Agent 3: Writer
status_text.text("✍️ Writer Agent: Creating report...")
progress_bar.progress(75)
time.sleep(0.5)
# Agent 4: Critic
status_text.text("🎯 Critic Agent: Quality check...")
progress_bar.progress(100)
time.sleep(0.5)
# Clear progress
progress_placeholder.empty()
if final_state and final_state.get("report_output"):
# Display agent summary
with agent_placeholder:
st.success("✅ Research Complete!")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.markdown("**🔍 Researcher**")
st.caption("Information gathered")
with col2:
st.markdown("**📊 Analyst**")
st.caption("Insights extracted")
with col3:
st.markdown("**✍️ Writer**")
st.caption("Report created")
with col4:
st.markdown("**🎯 Critic**")
st.caption(f"Score: {final_state['critique_output'].score:.1f}/10")
# Store in session
st.session_state.current_research = final_state
# Add to history
st.session_state.history.append({
"timestamp": datetime.now(),
"query": query,
"result": final_state
})
# Display report
format_report(
final_state["report_output"],
final_state["research_output"],
final_state["critique_output"]
)
# Export options
st.markdown("---")
st.markdown("### 📥 Export")
col1, col2, col3 = st.columns([1, 1, 2])
with col1:
report_text = export_report(
final_state["report_output"],
final_state["research_output"]
)
st.download_button(
label="📄 Download Markdown",
data=report_text,
file_name=f"research_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md",
mime="text/markdown",
use_container_width=True
)
with col2:
report_json = json.dumps({
"query": query,
"report": final_state["report_output"].dict(),
"research": final_state["research_output"].dict(),
"critique": final_state["critique_output"].dict(),
"timestamp": datetime.now().isoformat()
}, indent=2)
st.download_button(
label="📊 Download JSON",
data=report_json,
file_name=f"research_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
mime="application/json",
use_container_width=True
)
else:
st.error("❌ Research failed. Please try again.")
except Exception as e:
st.error(f"❌ Error during research: {str(e)}")
st.exception(e)
# Display current research if exists
elif st.session_state.current_research:
st.markdown("---")
st.info("💡 Previous research result shown below. Ask a new question above!")
final_state = st.session_state.current_research
format_report(
final_state["report_output"],
final_state["research_output"],
final_state["critique_output"]
)
# ═══════════════════════════════════════════════════════════════════════════
# HISTORY TAB
# ═══════════════════════════════════════════════════════════════════════════
if st.session_state.history:
st.markdown("---")
st.markdown("## 📚 Research History")
for i, item in enumerate(reversed(st.session_state.history)):
with st.expander(
f"🔍 {item['query'][:60]}... - {item['timestamp'].strftime('%H:%M:%S')}",
expanded=(i == 0)
):
if item['result'] and item['result'].get('report_output'):
col1, col2 = st.columns([3, 1])
with col1:
st.markdown(f"**Question:** {item['query']}")
st.markdown(f"**Answer:** {item['result']['research_output'].answer[:200]}...")
with col2:
st.metric("Quality", f"{item['result']['critique_output'].score:.1f}/10")
st.metric("Confidence", f"{item['result']['research_output'].confidence*100:.0f}%")
if st.button(f"📄 View Full Report #{len(st.session_state.history)-i}", key=f"view_{i}"):
st.session_state.current_research = item['result']
st.rerun()
# ═══════════════════════════════════════════════════════════════════════════
# FOOTER
# ═══════════════════════════════════════════════════════════════════════════
st.markdown("---")
footer_col1, footer_col2, footer_col3 = st.columns(3)
with footer_col1:
st.markdown("""
**🤖 Agentic AI System**
- Autonomous tool selection
- Multi-agent collaboration
- Iterative refinement
""")
with footer_col2:
st.markdown("""
**🛠️ Technologies**
- LangGraph
- Tavily Search
- Llama 3.1 8B
""")
with footer_col3:
st.markdown("""
**📊 Capabilities**
- Web search
- Calculations
- Knowledge base
- Real-time info
""")
st.markdown("""
<div style='text-align: center; color: gray; padding: 2rem;'>
<small>Multi-Agent Research Assistant | Powered by Tavily & LangGraph</small>
</div>
""", unsafe_allow_html=True)