SaritMeshesha's picture
Upload 2 files
186205a verified
raw
history blame
5.31 kB
import json
import os
import traceback
import uuid
from datetime import datetime
from typing import Dict
import pandas as pd
import streamlit as st
from datasets import load_dataset
from dotenv import load_dotenv
# Only import if file exists
try:
from langgraph_agent import DataAnalystAgent
AGENT_AVAILABLE = True
except ImportError as e:
AGENT_AVAILABLE = False
IMPORT_ERROR = str(e)
# Load environment variables
load_dotenv()
# Set up page config
st.set_page_config(
page_title="πŸ€– LangGraph Data Analyst Agent (Debug)",
layout="wide",
page_icon="πŸ€–",
initial_sidebar_state="expanded",
)
def check_environment():
"""Check the deployment environment and dependencies."""
st.markdown("## πŸ” Environment Debug Info")
# Check Python version
import sys
st.write(f"**Python Version:** {sys.version}")
# Check if running on Hugging Face
is_hf_space = os.environ.get("SPACE_ID") is not None
st.write(f"**Running on Hugging Face Spaces:** {is_hf_space}")
if is_hf_space:
st.write(f"**Space ID:** {os.environ.get('SPACE_ID', 'Unknown')}")
# Check API key availability
nebius_key = os.environ.get("NEBIUS_API_KEY")
openai_key = os.environ.get("OPENAI_API_KEY")
st.write(f"**Nebius API Key Available:** {'Yes' if nebius_key else 'No'}")
st.write(f"**OpenAI API Key Available:** {'Yes' if openai_key else 'No'}")
if nebius_key:
st.write(f"**Nebius Key Length:** {len(nebius_key)} characters")
if openai_key:
st.write(f"**OpenAI Key Length:** {len(openai_key)} characters")
# Check agent import
st.write(
f"**LangGraph Agent Import:** {'βœ… Success' if AGENT_AVAILABLE else '❌ Failed'}"
)
if not AGENT_AVAILABLE:
st.error(f"Import Error: {IMPORT_ERROR}")
# Check required packages
required_packages = [
"langchain",
"langchain_core",
"langchain_openai",
"langgraph",
"datasets",
"pandas",
]
st.markdown("### πŸ“¦ Package Availability")
for package in required_packages:
try:
__import__(package)
st.write(f"βœ… {package}")
except ImportError as e:
st.write(f"❌ {package} - {str(e)}")
def test_simple_agent():
"""Test basic agent functionality."""
if not AGENT_AVAILABLE:
st.error("Cannot test agent - import failed")
return
st.markdown("## πŸ§ͺ Agent Test")
# Get API key
api_key = os.environ.get("NEBIUS_API_KEY") or os.environ.get("OPENAI_API_KEY")
if not api_key:
st.error("No API key found!")
return
st.write("**API Key:** βœ… Available")
# Test agent creation
try:
st.write("**Creating Agent...**")
agent = DataAnalystAgent(api_key=api_key)
st.write("βœ… Agent created successfully")
# Test simple query
if st.button("πŸ§ͺ Test Simple Query"):
with st.spinner("Testing agent with simple query..."):
try:
result = agent.invoke("Hello, are you working?", "debug_test")
st.success("βœ… Agent responded successfully!")
st.markdown("**Response Messages:**")
for i, msg in enumerate(result.get("messages", [])):
st.write(
f"{i+1}. {type(msg).__name__}: {getattr(msg, 'content', 'No content')[:100]}..."
)
except Exception as e:
st.error(f"❌ Agent test failed: {str(e)}")
st.code(traceback.format_exc())
except Exception as e:
st.error(f"❌ Agent creation failed: {str(e)}")
st.code(traceback.format_exc())
def test_dataset_loading():
"""Test dataset loading."""
st.markdown("## πŸ“Š Dataset Test")
try:
with st.spinner("Loading dataset..."):
dataset = load_dataset(
"bitext/Bitext-customer-support-llm-chatbot-training-dataset"
)
df = pd.DataFrame(dataset["train"])
st.success(f"βœ… Dataset loaded: {len(df):,} records")
st.dataframe(df.head(3))
except Exception as e:
st.error(f"❌ Dataset loading failed: {str(e)}")
st.code(traceback.format_exc())
def main():
st.title("πŸ”§ LangGraph Agent Debug Tool")
st.markdown("This tool helps diagnose issues with the LangGraph agent deployment.")
# Environment check
check_environment()
st.markdown("---")
# Dataset test
test_dataset_loading()
st.markdown("---")
# Agent test
test_simple_agent()
st.markdown("---")
st.markdown("## πŸ’‘ Common Solutions")
st.markdown(
"""
**If agent creation fails:**
- Check API key is correctly set as Space secret
- Verify all dependencies are in requirements.txt
- Check for import errors above
**If agent hangs on 'thinking':**
- API key might be invalid/expired
- Network connectivity issues to API endpoint
- Unhandled exceptions in LangGraph workflow
**If dataset loading fails:**
- Network connectivity issues
- Hugging Face datasets library not properly installed
"""
)
if __name__ == "__main__":
main()