|
|
import json |
|
|
import os |
|
|
import traceback |
|
|
import uuid |
|
|
from datetime import datetime |
|
|
from typing import Dict |
|
|
|
|
|
import pandas as pd |
|
|
import streamlit as st |
|
|
from datasets import load_dataset |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
try: |
|
|
from langgraph_agent import DataAnalystAgent |
|
|
|
|
|
AGENT_AVAILABLE = True |
|
|
except ImportError as e: |
|
|
AGENT_AVAILABLE = False |
|
|
IMPORT_ERROR = str(e) |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
st.set_page_config( |
|
|
page_title="π€ LangGraph Data Analyst Agent (Debug)", |
|
|
layout="wide", |
|
|
page_icon="π€", |
|
|
initial_sidebar_state="expanded", |
|
|
) |
|
|
|
|
|
|
|
|
def check_environment(): |
|
|
"""Check the deployment environment and dependencies.""" |
|
|
st.markdown("## π Environment Debug Info") |
|
|
|
|
|
|
|
|
import sys |
|
|
|
|
|
st.write(f"**Python Version:** {sys.version}") |
|
|
|
|
|
|
|
|
is_hf_space = os.environ.get("SPACE_ID") is not None |
|
|
st.write(f"**Running on Hugging Face Spaces:** {is_hf_space}") |
|
|
if is_hf_space: |
|
|
st.write(f"**Space ID:** {os.environ.get('SPACE_ID', 'Unknown')}") |
|
|
|
|
|
|
|
|
nebius_key = os.environ.get("NEBIUS_API_KEY") |
|
|
openai_key = os.environ.get("OPENAI_API_KEY") |
|
|
st.write(f"**Nebius API Key Available:** {'Yes' if nebius_key else 'No'}") |
|
|
st.write(f"**OpenAI API Key Available:** {'Yes' if openai_key else 'No'}") |
|
|
|
|
|
if nebius_key: |
|
|
st.write(f"**Nebius Key Length:** {len(nebius_key)} characters") |
|
|
if openai_key: |
|
|
st.write(f"**OpenAI Key Length:** {len(openai_key)} characters") |
|
|
|
|
|
|
|
|
st.write( |
|
|
f"**LangGraph Agent Import:** {'β
Success' if AGENT_AVAILABLE else 'β Failed'}" |
|
|
) |
|
|
if not AGENT_AVAILABLE: |
|
|
st.error(f"Import Error: {IMPORT_ERROR}") |
|
|
|
|
|
|
|
|
required_packages = [ |
|
|
"langchain", |
|
|
"langchain_core", |
|
|
"langchain_openai", |
|
|
"langgraph", |
|
|
"datasets", |
|
|
"pandas", |
|
|
] |
|
|
|
|
|
st.markdown("### π¦ Package Availability") |
|
|
for package in required_packages: |
|
|
try: |
|
|
__import__(package) |
|
|
st.write(f"β
{package}") |
|
|
except ImportError as e: |
|
|
st.write(f"β {package} - {str(e)}") |
|
|
|
|
|
|
|
|
def test_simple_agent(): |
|
|
"""Test basic agent functionality.""" |
|
|
if not AGENT_AVAILABLE: |
|
|
st.error("Cannot test agent - import failed") |
|
|
return |
|
|
|
|
|
st.markdown("## π§ͺ Agent Test") |
|
|
|
|
|
|
|
|
api_key = os.environ.get("NEBIUS_API_KEY") or os.environ.get("OPENAI_API_KEY") |
|
|
if not api_key: |
|
|
st.error("No API key found!") |
|
|
return |
|
|
|
|
|
st.write("**API Key:** β
Available") |
|
|
|
|
|
|
|
|
try: |
|
|
st.write("**Creating Agent...**") |
|
|
agent = DataAnalystAgent(api_key=api_key) |
|
|
st.write("β
Agent created successfully") |
|
|
|
|
|
|
|
|
if st.button("π§ͺ Test Simple Query"): |
|
|
with st.spinner("Testing agent with simple query..."): |
|
|
try: |
|
|
result = agent.invoke("Hello, are you working?", "debug_test") |
|
|
st.success("β
Agent responded successfully!") |
|
|
|
|
|
st.markdown("**Response Messages:**") |
|
|
for i, msg in enumerate(result.get("messages", [])): |
|
|
st.write( |
|
|
f"{i+1}. {type(msg).__name__}: {getattr(msg, 'content', 'No content')[:100]}..." |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"β Agent test failed: {str(e)}") |
|
|
st.code(traceback.format_exc()) |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"β Agent creation failed: {str(e)}") |
|
|
st.code(traceback.format_exc()) |
|
|
|
|
|
|
|
|
def test_dataset_loading(): |
|
|
"""Test dataset loading.""" |
|
|
st.markdown("## π Dataset Test") |
|
|
|
|
|
try: |
|
|
with st.spinner("Loading dataset..."): |
|
|
dataset = load_dataset( |
|
|
"bitext/Bitext-customer-support-llm-chatbot-training-dataset" |
|
|
) |
|
|
df = pd.DataFrame(dataset["train"]) |
|
|
st.success(f"β
Dataset loaded: {len(df):,} records") |
|
|
st.dataframe(df.head(3)) |
|
|
except Exception as e: |
|
|
st.error(f"β Dataset loading failed: {str(e)}") |
|
|
st.code(traceback.format_exc()) |
|
|
|
|
|
|
|
|
def main(): |
|
|
st.title("π§ LangGraph Agent Debug Tool") |
|
|
st.markdown("This tool helps diagnose issues with the LangGraph agent deployment.") |
|
|
|
|
|
|
|
|
check_environment() |
|
|
|
|
|
st.markdown("---") |
|
|
|
|
|
|
|
|
test_dataset_loading() |
|
|
|
|
|
st.markdown("---") |
|
|
|
|
|
|
|
|
test_simple_agent() |
|
|
|
|
|
st.markdown("---") |
|
|
|
|
|
st.markdown("## π‘ Common Solutions") |
|
|
st.markdown( |
|
|
""" |
|
|
**If agent creation fails:** |
|
|
- Check API key is correctly set as Space secret |
|
|
- Verify all dependencies are in requirements.txt |
|
|
- Check for import errors above |
|
|
|
|
|
**If agent hangs on 'thinking':** |
|
|
- API key might be invalid/expired |
|
|
- Network connectivity issues to API endpoint |
|
|
- Unhandled exceptions in LangGraph workflow |
|
|
|
|
|
**If dataset loading fails:** |
|
|
- Network connectivity issues |
|
|
- Hugging Face datasets library not properly installed |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|