Harsh-1132's picture
Fix: Replace width='stretch' with use_container_width=True for Streamlit 1.35.0 compatibility
d94ee10
"""
Premium Streamlit UI for LangGraph RAG Q&A Agent
Enhanced with Blue & Black theme and dynamic dashboard
"""
import streamlit as st
import sys
from pathlib import Path
import json
from datetime import datetime, timezone
import plotly.graph_objects as go
import plotly.express as px
from typing import Dict, Any
# Add src to path
sys.path.insert(0, str(Path(__file__).parent))
from rag_pipeline import RAGPipeline
from llm_utils import create_llm_handler
from reflection import create_reflection_evaluator
from agent_workflow import create_rag_agent
from evaluation import create_evaluator
# Premium CSS Styling - Blue & Black Theme
PREMIUM_CSS = """
<style>
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700&display=swap');
* {
font-family: 'Inter', sans-serif;
}
/* Main app styling */
.stApp {
background: linear-gradient(135deg, #0a0e27 0%, #1a1d3e 100%);
}
/* Premium header */
.premium-header {
background: linear-gradient(135deg, #1e3c72 0%, #2a5298 50%, #7e22ce 100%);
padding: 2.5rem;
border-radius: 20px;
text-align: center;
box-shadow: 0 20px 60px rgba(30, 60, 114, 0.4);
margin-bottom: 2rem;
border: 1px solid rgba(255, 255, 255, 0.1);
}
.premium-header h1 {
color: #ffffff;
font-size: 3rem;
font-weight: 700;
margin: 0;
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
letter-spacing: -0.5px;
}
.premium-header p {
color: #e0e7ff;
font-size: 1.2rem;
margin-top: 0.5rem;
font-weight: 300;
}
/* Sidebar styling */
[data-testid="stSidebar"] {
background: linear-gradient(180deg, #0f172a 0%, #1e293b 100%);
border-right: 1px solid rgba(59, 130, 246, 0.2);
}
[data-testid="stSidebar"] h1,
[data-testid="stSidebar"] h2,
[data-testid="stSidebar"] h3,
[data-testid="stSidebar"] label,
[data-testid="stSidebar"] p {
color: #e0e7ff !important;
}
/* Premium metric cards */
.metric-card {
background: linear-gradient(135deg, #1e293b 0%, #334155 100%);
padding: 1.5rem;
border-radius: 15px;
border: 1px solid rgba(59, 130, 246, 0.3);
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
transition: all 0.3s ease;
margin: 0.5rem 0;
}
.metric-card:hover {
transform: translateY(-5px);
box-shadow: 0 15px 40px rgba(59, 130, 246, 0.4);
border-color: rgba(59, 130, 246, 0.6);
}
.metric-value {
font-size: 2.5rem;
font-weight: 700;
background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
margin: 0;
}
.metric-label {
color: #94a3b8;
font-size: 0.9rem;
text-transform: uppercase;
letter-spacing: 1px;
margin-top: 0.5rem;
}
/* Answer box */
.answer-box {
background: linear-gradient(135deg, #1e293b 0%, #2d3748 100%);
padding: 2rem;
border-radius: 20px;
border-left: 5px solid #3b82f6;
box-shadow: 0 10px 40px rgba(59, 130, 246, 0.2);
color: #e0e7ff;
font-size: 1.1rem;
line-height: 1.8;
margin: 1.5rem 0;
}
/* Status badges */
.status-badge {
display: inline-block;
padding: 0.5rem 1.5rem;
border-radius: 25px;
font-weight: 600;
font-size: 0.9rem;
text-transform: uppercase;
letter-spacing: 1px;
}
.status-relevant {
background: linear-gradient(135deg, #10b981 0%, #059669 100%);
color: white;
box-shadow: 0 5px 15px rgba(16, 185, 129, 0.4);
}
.status-partial {
background: linear-gradient(135deg, #f59e0b 0%, #d97706 100%);
color: white;
box-shadow: 0 5px 15px rgba(245, 158, 11, 0.4);
}
.status-irrelevant {
background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%);
color: white;
box-shadow: 0 5px 15px rgba(239, 68, 68, 0.4);
}
/* Input styling */
.stTextInput input {
background: rgba(30, 41, 59, 0.6) !important;
border: 2px solid rgba(59, 130, 246, 0.3) !important;
border-radius: 12px !important;
color: #e0e7ff !important;
font-size: 1.1rem !important;
padding: 0.8rem 1.2rem !important;
transition: all 0.3s ease !important;
}
.stTextInput input:focus {
border-color: #3b82f6 !important;
box-shadow: 0 0 20px rgba(59, 130, 246, 0.3) !important;
}
/* Buttons */
.stButton button {
background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%) !important;
color: white !important;
border: none !important;
border-radius: 12px !important;
padding: 0.7rem 2rem !important;
font-weight: 600 !important;
font-size: 1rem !important;
transition: all 0.3s ease !important;
box-shadow: 0 5px 15px rgba(59, 130, 246, 0.4) !important;
}
.stButton button:hover {
transform: translateY(-2px) !important;
box-shadow: 0 8px 25px rgba(59, 130, 246, 0.6) !important;
}
/* Tabs */
.stTabs [data-baseweb="tab-list"] {
gap: 1rem;
background: rgba(30, 41, 59, 0.4);
padding: 0.5rem;
border-radius: 12px;
}
.stTabs [data-baseweb="tab"] {
background: transparent;
color: #94a3b8;
border-radius: 8px;
padding: 0.7rem 1.5rem;
font-weight: 600;
transition: all 0.3s ease;
}
.stTabs [data-baseweb="tab"]:hover {
background: rgba(59, 130, 246, 0.2);
color: #3b82f6;
}
.stTabs [aria-selected="true"] {
background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%) !important;
color: white !important;
}
/* Expanders */
.streamlit-expanderHeader {
background: linear-gradient(135deg, #1e293b 0%, #334155 100%);
border: 1px solid rgba(59, 130, 246, 0.3);
border-radius: 12px;
color: #e0e7ff !important;
font-weight: 600;
padding: 1rem 1.5rem;
}
.streamlit-expanderHeader:hover {
border-color: rgba(59, 130, 246, 0.6);
background: linear-gradient(135deg, #334155 0%, #475569 100%);
}
/* Progress bars */
.stProgress > div > div > div {
background: linear-gradient(90deg, #3b82f6 0%, #8b5cf6 100%);
border-radius: 10px;
}
/* Info boxes */
.stAlert {
background: rgba(30, 41, 59, 0.6) !important;
border: 1px solid rgba(59, 130, 246, 0.3) !important;
border-radius: 12px !important;
color: #e0e7ff !important;
}
/* Scrollbar */
::-webkit-scrollbar {
width: 10px;
height: 10px;
}
::-webkit-scrollbar-track {
background: #0f172a;
}
::-webkit-scrollbar-thumb {
background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%);
border-radius: 5px;
}
::-webkit-scrollbar-thumb:hover {
background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%);
}
/* Footer */
.premium-footer {
text-align: center;
padding: 2rem;
margin-top: 3rem;
border-top: 1px solid rgba(59, 130, 246, 0.2);
color: #94a3b8;
}
/* Glow effect */
.glow {
animation: glow 2s ease-in-out infinite alternate;
}
@keyframes glow {
from {
box-shadow: 0 0 20px rgba(59, 130, 246, 0.4);
}
to {
box-shadow: 0 0 40px rgba(59, 130, 246, 0.8);
}
}
</style>
"""
@st.cache_resource
def initialize_agent(provider="huggingface", use_llm_reflection=False):
"""Initialize and cache the RAG agent."""
from pathlib import Path
import os
# Get correct paths
current_file = Path(__file__).resolve()
project_root = current_file.parent.parent
data_dir = project_root / "data"
chroma_dir = project_root / "chroma_db"
# Verify data directory exists
if not data_dir.exists():
raise FileNotFoundError(f"Data directory not found: {data_dir}")
# Initialize RAG pipeline with correct paths
rag_pipeline = RAGPipeline(
data_directory=str(data_dir),
collection_name="rag_knowledge_base",
persist_directory=str(chroma_dir)
)
rag_pipeline.build_index(force_rebuild=False)
llm_handler = create_llm_handler(
provider=provider,
model_name="google/flan-t5-large", # Force Flan-T5
temperature=0.7,
max_tokens=500
)
reflection_evaluator = create_reflection_evaluator(
llm_handler=llm_handler if use_llm_reflection else None,
use_llm_reflection=use_llm_reflection
)
agent = create_rag_agent(
rag_pipeline=rag_pipeline,
llm_handler=llm_handler,
reflection_evaluator=reflection_evaluator,
max_iterations=2
)
return agent
def initialize_evaluator():
"""Initialize the RAG evaluator."""
return create_evaluator()
def create_gauge_chart(value: float, title: str, max_value: float = 1.0) -> go.Figure:
"""Create a premium gauge chart."""
fig = go.Figure(go.Indicator(
mode="gauge+number+delta",
value=value * 100,
domain={'x': [0, 1], 'y': [0, 1]},
title={'text': title, 'font': {'size': 16, 'color': '#e0e7ff'}},
number={'suffix': "%", 'font': {'size': 40, 'color': '#3b82f6'}},
gauge={
'axis': {'range': [None, 100], 'tickwidth': 1, 'tickcolor': "#94a3b8"},
'bar': {'color': "#3b82f6"},
'bgcolor': "rgba(30, 41, 59, 0.5)",
'borderwidth': 2,
'bordercolor': "rgba(59, 130, 246, 0.3)",
'steps': [
{'range': [0, 40], 'color': 'rgba(239, 68, 68, 0.3)'},
{'range': [40, 70], 'color': 'rgba(245, 158, 11, 0.3)'},
{'range': [70, 100], 'color': 'rgba(16, 185, 129, 0.3)'}
],
'threshold': {
'line': {'color': "#8b5cf6", 'width': 4},
'thickness': 0.75,
'value': 80
}
}
))
fig.update_layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font={'color': "#e0e7ff", 'family': "Inter"},
height=250,
margin=dict(l=20, r=20, t=50, b=20)
)
return fig
def create_bar_chart(data: Dict[str, float], title: str) -> go.Figure:
"""Create a premium bar chart."""
fig = go.Figure(data=[
go.Bar(
x=list(data.keys()),
y=list(data.values()),
marker=dict(
color=list(data.values()),
colorscale=[[0, '#ef4444'], [0.5, '#f59e0b'], [1, '#10b981']],
line=dict(color='rgba(59, 130, 246, 0.5)', width=2)
),
text=[f'{v:.3f}' for v in data.values()],
textposition='outside',
textfont=dict(color='#e0e7ff', size=14)
)
])
fig.update_layout(
title=dict(text=title, font=dict(size=18, color='#e0e7ff')),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(30, 41, 59, 0.3)',
font={'color': "#e0e7ff", 'family': "Inter"},
xaxis=dict(gridcolor='rgba(59, 130, 246, 0.1)'),
yaxis=dict(gridcolor='rgba(59, 130, 246, 0.1)', range=[0, 1]),
height=300,
margin=dict(l=40, r=40, t=60, b=40)
)
return fig
def create_radar_chart(scores: Dict[str, float]) -> go.Figure:
"""Create a premium radar chart for score breakdown."""
categories = list(scores.keys())
values = list(scores.values())
fig = go.Figure(data=go.Scatterpolar(
r=values,
theta=[cat.replace('_', ' ').title() for cat in categories],
fill='toself',
fillcolor='rgba(59, 130, 246, 0.3)',
line=dict(color='#3b82f6', width=3)
))
fig.update_layout(
polar=dict(
bgcolor='rgba(30, 41, 59, 0.3)',
radialaxis=dict(
visible=True,
range=[0, 1],
gridcolor='rgba(59, 130, 246, 0.2)',
tickfont=dict(color='#94a3b8')
),
angularaxis=dict(
gridcolor='rgba(59, 130, 246, 0.2)',
tickfont=dict(color='#e0e7ff', size=11)
)
),
paper_bgcolor='rgba(0,0,0,0)',
font={'color': "#e0e7ff", 'family': "Inter"},
height=400,
margin=dict(l=80, r=80, t=40, b=40)
)
return fig
def display_premium_metrics(evaluation_result: Dict, reflection_result: Dict):
"""Display premium metrics dashboard."""
st.markdown("---")
st.markdown("## πŸ“Š **Dynamic Performance Dashboard**")
metrics = evaluation_result.get("metrics", {})
# Tab system
tab1, tab2, tab3, tab4 = st.tabs([
"🎯 **Overview**",
"πŸ“ˆ **Quality Scores**",
"πŸ” **Reflection Analysis**",
"πŸ“‹ **Detailed Report**"
])
with tab1:
st.markdown("### Real-Time Performance Metrics")
# Top row - Gauge charts
col1, col2, col3 = st.columns(3)
with col1:
context_rel = metrics.get('context_relevance', 0)
fig = create_gauge_chart(context_rel, "Context Relevance")
st.plotly_chart(fig, use_container_width=True)
with col2:
reflection_score = reflection_result.get('score', 0)
fig = create_gauge_chart(reflection_score, "Overall Quality")
st.plotly_chart(fig, use_container_width=True)
with col3:
# Calculate average score
avg_score = 0
count = 0
if "rouge" in metrics:
avg_score += sum(metrics["rouge"].values()) / len(metrics["rouge"])
count += 1
if "bertscore" in metrics:
avg_score += metrics["bertscore"].get("f1", 0)
count += 1
if count > 0:
avg_score /= count
fig = create_gauge_chart(avg_score if count > 0 else reflection_score, "Combined Score")
st.plotly_chart(fig, use_container_width=True)
# Bottom row - Key stats
st.markdown("### Key Statistics")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.markdown(f"""
<div class="metric-card">
<div class="metric-value">{metrics.get('answer_length', 0)}</div>
<div class="metric-label">Characters</div>
</div>
""", unsafe_allow_html=True)
with col2:
st.markdown(f"""
<div class="metric-card">
<div class="metric-value">{metrics.get('word_count', 0)}</div>
<div class="metric-label">Words</div>
</div>
""", unsafe_allow_html=True)
with col3:
st.markdown(f"""
<div class="metric-card">
<div class="metric-value">{evaluation_result.get('num_contexts', 0)}</div>
<div class="metric-label">Contexts Used</div>
</div>
""", unsafe_allow_html=True)
with col4:
iterations = st.session_state.get('iterations', 0)
st.markdown(f"""
<div class="metric-card">
<div class="metric-value">{iterations}</div>
<div class="metric-label">Iterations</div>
</div>
""", unsafe_allow_html=True)
with tab2:
st.markdown("### Quality Assessment Scores")
col1, col2 = st.columns(2)
with col1:
if "rouge" in metrics:
st.markdown("#### πŸ“ ROUGE Scores")
rouge_data = {
'ROUGE-1': metrics["rouge"].get('rouge1', 0),
'ROUGE-2': metrics["rouge"].get('rouge2', 0),
'ROUGE-L': metrics["rouge"].get('rougeL', 0)
}
fig = create_bar_chart(rouge_data, "ROUGE Score Analysis")
st.plotly_chart(fig, use_container_width=True)
else:
st.info("πŸ’‘ Add a reference answer to see ROUGE scores")
with col2:
if "bertscore" in metrics:
st.markdown("#### 🧠 BERTScore Metrics")
bert_data = {
'Precision': metrics["bertscore"].get('precision', 0),
'Recall': metrics["bertscore"].get('recall', 0),
'F1 Score': metrics["bertscore"].get('f1', 0)
}
fig = create_bar_chart(bert_data, "BERTScore Analysis")
st.plotly_chart(fig, use_container_width=True)
else:
st.info("πŸ’‘ Add a reference answer to see BERTScore")
with tab3:
st.markdown("### Reflection Analysis Dashboard")
col1, col2 = st.columns([1, 2])
with col1:
relevance = reflection_result.get('relevance', 'Unknown')
if relevance == "Relevant":
badge_class = "status-relevant"
icon = "βœ…"
elif relevance == "Partially Relevant":
badge_class = "status-partial"
icon = "⚠️"
else:
badge_class = "status-irrelevant"
icon = "❌"
st.markdown(f"""
<div style="text-align: center; padding: 2rem;">
<div class="status-badge {badge_class}">
{icon} {relevance}
</div>
<h2 style="color: #3b82f6; margin-top: 1.5rem;">{reflection_result.get('score', 0):.1%}</h2>
<p style="color: #94a3b8;">Quality Score</p>
<div style="margin-top: 1.5rem;">
<strong style="color: #e0e7ff;">Recommendation:</strong>
<p style="color: #3b82f6; font-size: 1.1rem; margin-top: 0.5rem;">
{reflection_result.get('recommendation', 'N/A')}
</p>
</div>
</div>
""", unsafe_allow_html=True)
with col2:
st.markdown("#### πŸ’­ Reasoning")
st.markdown(f"""
<div class="answer-box">
{reflection_result.get('reasoning', 'No reasoning provided')}
</div>
""", unsafe_allow_html=True)
# Radar chart for score breakdown
if reflection_result.get('method') == 'heuristic':
breakdown = reflection_result.get('score_breakdown', {})
if breakdown:
st.markdown("#### πŸ“Š Score Breakdown")
fig = create_radar_chart(breakdown)
st.plotly_chart(fig, use_container_width=True)
with tab4:
st.markdown("### Detailed Evaluation Report")
# Timestamp - FIXED
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
st.markdown(f"**Generated:** `{timestamp}`")
col1, col2 = st.columns(2)
with col1:
st.markdown("#### Evaluation Metrics")
st.json(metrics)
with col2:
st.markdown("#### Reflection Analysis")
st.json(reflection_result)
# Download button
combined_data = {
"timestamp": timestamp,
"query": evaluation_result.get("query", ""),
"generated_answer": evaluation_result.get("generated_answer", ""),
"evaluation_metrics": metrics,
"reflection_analysis": reflection_result
}
json_str = json.dumps(combined_data, indent=2)
st.download_button(
label="πŸ“₯ **Download Complete Report (JSON)**",
data=json_str,
file_name=f"rag_evaluation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
mime="application/json",
use_container_width=True
)
def main():
"""Main Premium Streamlit app."""
st.set_page_config(
page_title="LangGraph RAG Q&A Agent - Premium",
page_icon="πŸ€–",
layout="wide",
initial_sidebar_state="expanded"
)
# Apply premium CSS
st.markdown(PREMIUM_CSS, unsafe_allow_html=True)
# Premium Header
st.markdown("""
<div class="premium-header glow">
<h1>πŸ€– LangGraph RAG Q&A Agent</h1>
<p>Next-Gen AI Assistant with Real-Time Analytics & Dynamic Dashboards</p>
</div>
""", unsafe_allow_html=True)
# Sidebar
with st.sidebar:
st.markdown("## βš™οΈ **Control Panel**")
provider = st.selectbox(
"πŸ”Œ LLM Provider",
["huggingface", "openai"],
help="Select your preferred LLM provider"
)
use_llm_reflection = st.checkbox(
"🧠 LLM Reflection Mode",
value=False,
help="Enable AI-powered reflection (more accurate, slower)"
)
enable_evaluation = st.checkbox(
"πŸ“Š Advanced Analytics",
value=True,
help="Enable comprehensive evaluation metrics"
)
st.markdown("---")
st.markdown("### πŸ“ **Reference Answer**")
reference_answer = st.text_area(
"Optional: For comparison metrics",
placeholder="Provide a reference answer to calculate advanced metrics...",
height=100,
label_visibility="collapsed"
)
st.markdown("---")
st.markdown("### πŸ’‘ **Quick Queries**")
sample_queries = [
("πŸ€–", "What is machine learning?"),
("🐍", "Explain Python programming"),
("☁️", "What is cloud computing?"),
("πŸ’Ύ", "Tell me about databases"),
("🧠", "What is deep learning?"),
("πŸ“Š", "Explain supervised learning"),
("πŸ—„οΈ", "What are NoSQL databases?"),
("πŸ’¬", "What is NLP?")
]
for icon, query in sample_queries:
if st.button(f"{icon} {query}", key=f"sample_{query}", use_container_width=True):
st.session_state["query"] = query
st.markdown("---")
st.markdown("""
<div style="padding: 1rem; background: rgba(59, 130, 246, 0.1); border-radius: 10px; border: 1px solid rgba(59, 130, 246, 0.3);">
<h4 style="color: #3b82f6; margin: 0;">πŸ“Š Metrics Available</h4>
<ul style="color: #94a3b8; font-size: 0.9rem; margin-top: 0.5rem;">
<li>Context Relevance</li>
<li>Quality Scores</li>
<li>ROUGE Analysis*</li>
<li>BERTScore*</li>
<li>Reflection Insights</li>
</ul>
<p style="color: #64748b; font-size: 0.8rem; margin-top: 0.5rem;">*Requires reference answer</p>
</div>
""", unsafe_allow_html=True)
# Initialize components
try:
with st.spinner("πŸš€ Initializing AI Agent..."):
agent = initialize_agent(provider, use_llm_reflection)
if enable_evaluation:
evaluator = initialize_evaluator()
st.sidebar.success("βœ… **System Online**")
except Exception as e:
st.sidebar.error(f"❌ **Error:** {str(e)[:50]}...")
st.stop()
# Main query interface
st.markdown("### πŸ’¬ **Ask Your Question**")
query = st.text_input(
"Query input",
value=st.session_state.get("query", ""),
placeholder="Type your question about AI, Python, ML, Cloud, or Databases...",
label_visibility="collapsed"
)
col1, col2, col3, col4, col5 = st.columns([2, 2, 2, 2, 2])
with col1:
submit_button = st.button("πŸš€ **Ask Question**", type="primary", use_container_width=True)
with col2:
clear_button = st.button("πŸ—‘οΈ **Clear**", use_container_width=True)
if clear_button:
st.session_state.clear()
st.rerun()
# Process query
if submit_button and query:
with st.spinner("πŸ€” Processing your question..."):
try:
result = agent.query(query)
# Store iterations
st.session_state['iterations'] = result.get('iteration', 0)
# Display answer - CRITICAL SECTION
st.markdown("---")
st.markdown("## πŸ’¬ **AI Response**")
# Get answer from multiple possible keys
answer = result.get('final_response', '') or result.get('answer', '')
if answer and answer.strip():
st.markdown(f"""
<div class="answer-box glow">
{answer}
</div>
""", unsafe_allow_html=True)
else:
st.warning("⚠️ Answer was generated but appears empty. Check terminal output.")
st.code(str(result), language="json") # Debug output
# Show iteration info
if result.get("iteration", 0) > 0:
st.info(f"πŸ”„ Answer refined {result['iteration']} time(s) using reflection feedback")
# Evaluation
if enable_evaluation:
with st.spinner("πŸ“Š Calculating analytics..."):
retrieved_contexts = None
if result.get("retrieved_chunks"):
retrieved_contexts = [chunk["content"] for chunk in result["retrieved_chunks"]]
evaluation_result = evaluator.evaluate_response(
query=query,
generated_answer=answer,
reference_answer=reference_answer if reference_answer.strip() else None,
retrieved_contexts=retrieved_contexts
)
display_premium_metrics(evaluation_result, result.get("reflection", {}))
# Processing details
st.markdown("---")
st.markdown("## πŸ” **Processing Pipeline**")
col1, col2 = st.columns(2)
with col1:
with st.expander("πŸ“‹ **Planning Phase**", expanded=False):
st.markdown(f"""
<div style="background: rgba(30, 41, 59, 0.5); padding: 1rem; border-radius: 10px; color: #e0e7ff;">
{result.get("plan", "No plan available")}
</div>
""", unsafe_allow_html=True)
with col2:
chunks = result.get('retrieved_chunks', [])
with st.expander(f"πŸ” **Retrieved Context** ({len(chunks)} chunks)", expanded=False):
if chunks:
for i, chunk in enumerate(chunks, 1):
st.markdown(f"""
<div style="background: rgba(30, 41, 59, 0.5); padding: 1rem; border-radius: 10px; margin-bottom: 1rem; border-left: 3px solid #3b82f6;">
<strong style="color: #3b82f6;">Chunk {i}</strong> -
<span style="color: #94a3b8;">{chunk['metadata']['source']}</span>
<br>
<small style="color: #64748b;">Similarity: {chunk['similarity_score']:.3f}</small>
<p style="color: #e0e7ff; margin-top: 0.5rem; font-size: 0.9rem;">
{chunk["content"][:300]}{'...' if len(chunk["content"]) > 300 else ''}
</p>
</div>
""", unsafe_allow_html=True)
else:
st.info("No context retrieval needed for this query")
except Exception as e:
st.error(f"❌ **Error:** {str(e)}")
with st.expander("πŸ” **Error Details**"):
import traceback
st.code(traceback.format_exc())
# Premium Footer
st.markdown("""
<div class="premium-footer">
<p style="font-size: 1.1rem; font-weight: 600; color: #3b82f6;">
Built with LangGraph β€’ LangChain β€’ ChromaDB β€’ Streamlit
</p>
<p style="font-size: 0.9rem; margin-top: 0.5rem;">
Knowledge Domains: AI β€’ Python β€’ Machine Learning β€’ Cloud Computing β€’ Databases
</p>
<p style="font-size: 0.8rem; color: #64748b; margin-top: 1rem;">
Β© 2025 LangGraph RAG Agent | Premium Edition
</p>
</div>
""", unsafe_allow_html=True)
if __name__ == "__main__":
main()