File size: 5,312 Bytes
186205a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import json
import os
import traceback
import uuid
from datetime import datetime
from typing import Dict
import pandas as pd
import streamlit as st
from datasets import load_dataset
from dotenv import load_dotenv
# Only import if file exists
try:
from langgraph_agent import DataAnalystAgent
AGENT_AVAILABLE = True
except ImportError as e:
AGENT_AVAILABLE = False
IMPORT_ERROR = str(e)
# Load environment variables
load_dotenv()
# Set up page config
st.set_page_config(
page_title="π€ LangGraph Data Analyst Agent (Debug)",
layout="wide",
page_icon="π€",
initial_sidebar_state="expanded",
)
def check_environment():
"""Check the deployment environment and dependencies."""
st.markdown("## π Environment Debug Info")
# Check Python version
import sys
st.write(f"**Python Version:** {sys.version}")
# Check if running on Hugging Face
is_hf_space = os.environ.get("SPACE_ID") is not None
st.write(f"**Running on Hugging Face Spaces:** {is_hf_space}")
if is_hf_space:
st.write(f"**Space ID:** {os.environ.get('SPACE_ID', 'Unknown')}")
# Check API key availability
nebius_key = os.environ.get("NEBIUS_API_KEY")
openai_key = os.environ.get("OPENAI_API_KEY")
st.write(f"**Nebius API Key Available:** {'Yes' if nebius_key else 'No'}")
st.write(f"**OpenAI API Key Available:** {'Yes' if openai_key else 'No'}")
if nebius_key:
st.write(f"**Nebius Key Length:** {len(nebius_key)} characters")
if openai_key:
st.write(f"**OpenAI Key Length:** {len(openai_key)} characters")
# Check agent import
st.write(
f"**LangGraph Agent Import:** {'β
Success' if AGENT_AVAILABLE else 'β Failed'}"
)
if not AGENT_AVAILABLE:
st.error(f"Import Error: {IMPORT_ERROR}")
# Check required packages
required_packages = [
"langchain",
"langchain_core",
"langchain_openai",
"langgraph",
"datasets",
"pandas",
]
st.markdown("### π¦ Package Availability")
for package in required_packages:
try:
__import__(package)
st.write(f"β
{package}")
except ImportError as e:
st.write(f"β {package} - {str(e)}")
def test_simple_agent():
"""Test basic agent functionality."""
if not AGENT_AVAILABLE:
st.error("Cannot test agent - import failed")
return
st.markdown("## π§ͺ Agent Test")
# Get API key
api_key = os.environ.get("NEBIUS_API_KEY") or os.environ.get("OPENAI_API_KEY")
if not api_key:
st.error("No API key found!")
return
st.write("**API Key:** β
Available")
# Test agent creation
try:
st.write("**Creating Agent...**")
agent = DataAnalystAgent(api_key=api_key)
st.write("β
Agent created successfully")
# Test simple query
if st.button("π§ͺ Test Simple Query"):
with st.spinner("Testing agent with simple query..."):
try:
result = agent.invoke("Hello, are you working?", "debug_test")
st.success("β
Agent responded successfully!")
st.markdown("**Response Messages:**")
for i, msg in enumerate(result.get("messages", [])):
st.write(
f"{i+1}. {type(msg).__name__}: {getattr(msg, 'content', 'No content')[:100]}..."
)
except Exception as e:
st.error(f"β Agent test failed: {str(e)}")
st.code(traceback.format_exc())
except Exception as e:
st.error(f"β Agent creation failed: {str(e)}")
st.code(traceback.format_exc())
def test_dataset_loading():
"""Test dataset loading."""
st.markdown("## π Dataset Test")
try:
with st.spinner("Loading dataset..."):
dataset = load_dataset(
"bitext/Bitext-customer-support-llm-chatbot-training-dataset"
)
df = pd.DataFrame(dataset["train"])
st.success(f"β
Dataset loaded: {len(df):,} records")
st.dataframe(df.head(3))
except Exception as e:
st.error(f"β Dataset loading failed: {str(e)}")
st.code(traceback.format_exc())
def main():
st.title("π§ LangGraph Agent Debug Tool")
st.markdown("This tool helps diagnose issues with the LangGraph agent deployment.")
# Environment check
check_environment()
st.markdown("---")
# Dataset test
test_dataset_loading()
st.markdown("---")
# Agent test
test_simple_agent()
st.markdown("---")
st.markdown("## π‘ Common Solutions")
st.markdown(
"""
**If agent creation fails:**
- Check API key is correctly set as Space secret
- Verify all dependencies are in requirements.txt
- Check for import errors above
**If agent hangs on 'thinking':**
- API key might be invalid/expired
- Network connectivity issues to API endpoint
- Unhandled exceptions in LangGraph workflow
**If dataset loading fails:**
- Network connectivity issues
- Hugging Face datasets library not properly installed
"""
)
if __name__ == "__main__":
main()
|