Initial commit to hf space for hackathon
Browse files- .env.example +74 -0
- .gitattributes +3 -0
- .gitignore +144 -0
- README.md +80 -8
- agent_cache.py +110 -0
- app.py +1780 -0
- bonus_ebird_tools.py +841 -0
- docs/bonus_ebird_docs/EBIRD_MCP_README.md +972 -0
- examples/another_bird.jpg +3 -0
- examples/b64_helper.py +33 -0
- examples/bird_example_1.jpg +3 -0
- examples/bird_example_2.jpg +3 -0
- examples/bird_example_3.jpg +0 -0
- langgraph.json +8 -0
- langgraph_agent/__init__.py +21 -0
- langgraph_agent/__main__.py +8 -0
- langgraph_agent/agents.py +100 -0
- langgraph_agent/agents.py.legacy +366 -0
- langgraph_agent/agents_README.md +1195 -0
- langgraph_agent/config.py +67 -0
- langgraph_agent/main.py +105 -0
- langgraph_agent/mcp_clients.py +103 -0
- langgraph_agent/mcp_clients.py.ebird +106 -0
- langgraph_agent/prompts.py +213 -0
- langgraph_agent/simple_demo.py +78 -0
- langgraph_agent/structured_output.py +167 -0
- langgraph_agent/subagent_config.py +221 -0
- langgraph_agent/subagent_factory.py +85 -0
- langgraph_agent/subagent_router.py.legacy +91 -0
- langgraph_agent/subagent_supervisor.py +55 -0
- langgraph_agent/test_agent.py +89 -0
- langgraph_agent/test_agent.py.v1 +36 -0
- langgraph_agent/tools.py +0 -0
- modal_bird_classifier.py +262 -0
- nuthatch_tools.py +737 -0
- requirements.txt +24 -0
- test_file.py +1 -0
- tests/test_agent_cache.py +90 -0
- tests/test_modal_direct.py +283 -0
- tests/test_nuthatch.py +205 -0
- tests/test_structured_output.py +136 -0
- tests/test_subagents.py +301 -0
- tests/theme_builder.py +3 -0
.env.example
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# Environment
|
| 5 |
+
ENVIRONMENT=production
|
| 6 |
+
|
| 7 |
+
# ------------------------------------------------
|
| 8 |
+
|
| 9 |
+
# Deprecated - use provider-specific models below
|
| 10 |
+
#LLM_MODEL=gpt-4o-mini
|
| 11 |
+
#####
|
| 12 |
+
|
| 13 |
+
# ------------------------------------------------
|
| 14 |
+
# LLM Configuration
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Provider-Specific Models (recommended)
|
| 18 |
+
DEFAULT_OPENAI_MODEL=gpt-4o-mini
|
| 19 |
+
DEFAULT_HF_MODEL=Qwen/Qwen3-VL-30B-A3B-Instruct
|
| 20 |
+
DEFAULT_ANTHROPIC_MODEL=claude-sonnet-4-5-20250929
|
| 21 |
+
|
| 22 |
+
# Provider-Specific Temperature Settings
|
| 23 |
+
OPENAI_TEMPERATURE=0.0
|
| 24 |
+
HF_TEMPERATURE=0.1
|
| 25 |
+
ANTHROPIC_TEMPERATURE=0.0
|
| 26 |
+
|
| 27 |
+
# ------------------------------------------------
|
| 28 |
+
|
| 29 |
+
################################################
|
| 30 |
+
|
| 31 |
+
# ------------------------------------------------
|
| 32 |
+
|
| 33 |
+
################################################
|
| 34 |
+
# MODAL Bird Classifier MCP Server
|
| 35 |
+
################################################
|
| 36 |
+
# # Client side API key for Modal bird-classifier-api-key (set in modal as API_KEY)
|
| 37 |
+
# use random string e.g., ssl rand -base64 32
|
| 38 |
+
BIRD_CLASSIFIER_API_KEY=<secure-key-for-modal-bird-classifier>
|
| 39 |
+
|
| 40 |
+
# https://.../mcp
|
| 41 |
+
MODAL_MCP_URL=<secure-key-for-modal-mcp>
|
| 42 |
+
|
| 43 |
+
# ------------------------------------------------
|
| 44 |
+
|
| 45 |
+
##############################################
|
| 46 |
+
# eBird MCP Server - LEGACY
|
| 47 |
+
##############################################
|
| 48 |
+
# Use true for HF Space (subprocess mode)
|
| 49 |
+
#EBIRD_USE_STDIO=true
|
| 50 |
+
|
| 51 |
+
# Cornell eBird API
|
| 52 |
+
#EBIRD_API_KEY=
|
| 53 |
+
# REQUIRED for eBird API calls
|
| 54 |
+
#EBIRD_BASE_URL=https://api.ebird.org/v2
|
| 55 |
+
#EBIRD_MCP_AUTH_KEY=<secure_key_for_ebird_auth> # Only needed for HTTP
|
| 56 |
+
#EBIRD_MCP_URL=http://localhost:8000/mcp # Update if eBird server deployed separately
|
| 57 |
+
|
| 58 |
+
# ------------------------------------------------
|
| 59 |
+
|
| 60 |
+
##############################################
|
| 61 |
+
# eBird MCP Server
|
| 62 |
+
##############################################
|
| 63 |
+
###### NUTHATCH HAIL MARY ######
|
| 64 |
+
NUTHATCH_USE_STDIO=true
|
| 65 |
+
|
| 66 |
+
NUTHATCH_API_KEY=<secure-key-for-nuthatch-api>
|
| 67 |
+
#NUTHATCH_BASE_URL=https://nuthatch.lastelm.software/v2 # Optional, has default
|
| 68 |
+
|
| 69 |
+
# use random string e.g., ssl rand -base64 32
|
| 70 |
+
NUTHATCH_MCP_AUTH_KEY=<secure-key-for-nuthatch-mcp-auth>
|
| 71 |
+
NUTHATCH_MCP_URL=http://localhost:8000/mcp # Only for HTTP mode
|
| 72 |
+
DEFAULT_TIMEOUT=15
|
| 73 |
+
RATE_LIMIT_DELAY=1.0
|
| 74 |
+
|
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
examples/another_bird.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
examples/bird_example_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
examples/bird_example_2.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
MANIFEST
|
| 27 |
+
|
| 28 |
+
# PyInstaller
|
| 29 |
+
*.manifest
|
| 30 |
+
*.spec
|
| 31 |
+
|
| 32 |
+
# Installer logs
|
| 33 |
+
pip-log.txt
|
| 34 |
+
pip-delete-this-directory.txt
|
| 35 |
+
|
| 36 |
+
# Project-specific directories
|
| 37 |
+
#app_drafts/
|
| 38 |
+
#docs/
|
| 39 |
+
#tests/
|
| 40 |
+
tests_archive/
|
| 41 |
+
#examples/
|
| 42 |
+
|
| 43 |
+
# Unit test / coverage reports
|
| 44 |
+
htmlcov/
|
| 45 |
+
.tox/
|
| 46 |
+
.coverage
|
| 47 |
+
.coverage.*
|
| 48 |
+
.cache
|
| 49 |
+
nosetests.xml
|
| 50 |
+
coverage.xml
|
| 51 |
+
*.cover
|
| 52 |
+
.hypothesis/
|
| 53 |
+
.pytest_cache/
|
| 54 |
+
|
| 55 |
+
# Translations
|
| 56 |
+
*.mo
|
| 57 |
+
*.pot
|
| 58 |
+
|
| 59 |
+
# Django stuff:
|
| 60 |
+
*.log
|
| 61 |
+
local_settings.py
|
| 62 |
+
db.sqlite3
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
target/
|
| 76 |
+
|
| 77 |
+
# Jupyter Notebook
|
| 78 |
+
.ipynb_checkpoints
|
| 79 |
+
|
| 80 |
+
# pyenv
|
| 81 |
+
.python-version
|
| 82 |
+
|
| 83 |
+
# celery beat schedule file
|
| 84 |
+
celerybeat-schedule
|
| 85 |
+
|
| 86 |
+
# SageMath parsed files
|
| 87 |
+
*.sage.py
|
| 88 |
+
|
| 89 |
+
# Environments
|
| 90 |
+
.env
|
| 91 |
+
.env.backup
|
| 92 |
+
.venv
|
| 93 |
+
env/
|
| 94 |
+
venv/
|
| 95 |
+
ENV/
|
| 96 |
+
env.bak/
|
| 97 |
+
venv.bak/
|
| 98 |
+
|
| 99 |
+
# Spyder project settings
|
| 100 |
+
.spyderproject
|
| 101 |
+
.spyproject
|
| 102 |
+
|
| 103 |
+
# Rope project settings
|
| 104 |
+
.ropeproject
|
| 105 |
+
|
| 106 |
+
# mkdocs documentation
|
| 107 |
+
/site
|
| 108 |
+
|
| 109 |
+
# mypy
|
| 110 |
+
.mypy_cache/
|
| 111 |
+
.dmypy.json
|
| 112 |
+
dmypy.json
|
| 113 |
+
|
| 114 |
+
# Pyre type checker
|
| 115 |
+
.pyre/
|
| 116 |
+
|
| 117 |
+
# LangGraph checkpoints and cache
|
| 118 |
+
.langgraph_api/
|
| 119 |
+
|
| 120 |
+
# Modal (if using local development)
|
| 121 |
+
.modal/
|
| 122 |
+
|
| 123 |
+
# IDE files
|
| 124 |
+
.vscode/
|
| 125 |
+
.idea/
|
| 126 |
+
|
| 127 |
+
# OS generated files
|
| 128 |
+
.DS_Store
|
| 129 |
+
.DS_Store?
|
| 130 |
+
._*
|
| 131 |
+
.Spotlight-V100
|
| 132 |
+
.Trashes
|
| 133 |
+
ehthumbs.db
|
| 134 |
+
Thumbs.db
|
| 135 |
+
|
| 136 |
+
# Temporary files
|
| 137 |
+
*.tmp
|
| 138 |
+
*.swp
|
| 139 |
+
*~
|
| 140 |
+
*.bak
|
| 141 |
+
|
| 142 |
+
# Logs
|
| 143 |
+
*.log
|
| 144 |
+
logs/
|
README.md
CHANGED
|
@@ -1,14 +1,86 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
license: mit
|
| 11 |
-
short_description: 'hackathon submission: A bird loving AI Agent with MCP tools '
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: BirdScope AI - MCP Agent
|
| 3 |
+
emoji: 🦅
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
+
python_version: 3.11
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
+
# 🦅 BirdScope AI - Streaming MCP Agent
|
| 13 |
+
|
| 14 |
+
**Real-time bird identification powered by MCP tools!**
|
| 15 |
+
|
| 16 |
+
Built for the [MCP 1st Birthday Hackathon](https://huggingface.co/MCP-First-Birthday)
|
| 17 |
+
|
| 18 |
+
## Features
|
| 19 |
+
|
| 20 |
+
- 🔍 **Image Classification**: Upload bird photos for instant AI identification
|
| 21 |
+
- 🗺️ **Location Discovery**: Find birding hotspots near any location
|
| 22 |
+
- 📊 **Sighting Data**: Get recent observations from eBird API
|
| 23 |
+
- 🤖 **Multi-Provider**: Support for HuggingFace (free credits) & OpenAI
|
| 24 |
+
|
| 25 |
+
## How to Use
|
| 26 |
+
|
| 27 |
+
### Option 1: HuggingFace (Recommended for Hackathon)
|
| 28 |
+
1. Get your HuggingFace API key from [Settings → Access Tokens](https://huggingface.co/settings/tokens)
|
| 29 |
+
2. Select "HuggingFace" as your provider in the sidebar
|
| 30 |
+
3. Enter your HF API key in the sidebar
|
| 31 |
+
4. Start chatting! (Uses your $25 hackathon credits)
|
| 32 |
+
|
| 33 |
+
### Option 2: OpenAI
|
| 34 |
+
1. Select "OpenAI" as your provider in the sidebar
|
| 35 |
+
2. Enter your OpenAI API key in the sidebar
|
| 36 |
+
3. Start chatting!
|
| 37 |
+
|
| 38 |
+
## Architecture
|
| 39 |
+
|
| 40 |
+
This Space uses **MCP (Model Context Protocol)** to connect AI agents with:
|
| 41 |
+
|
| 42 |
+
### Modal MCP Server
|
| 43 |
+
- GPU-powered bird classification
|
| 44 |
+
- ResNet50 model trained on 555 bird species
|
| 45 |
+
- Real-time image processing
|
| 46 |
+
|
| 47 |
+
### eBird MCP Server
|
| 48 |
+
- 7 tools for bird data discovery
|
| 49 |
+
- Recent sightings, hotspot locations
|
| 50 |
+
- Notable/rare bird alerts
|
| 51 |
+
- Powered by Cornell Lab of Ornithology
|
| 52 |
+
|
| 53 |
+
## Technology Stack
|
| 54 |
+
|
| 55 |
+
- **Frontend**: Gradio 6.0 with custom Blocks UI
|
| 56 |
+
- **Agent Framework**: LangGraph with streaming support
|
| 57 |
+
- **MCP Clients**: FastMCP for tool integration
|
| 58 |
+
- **LLM Providers**:
|
| 59 |
+
- HuggingFace Inference API (Qwen/Qwen3-Coder)
|
| 60 |
+
- OpenAI (gpt-4o-mini)
|
| 61 |
+
|
| 62 |
+
## Development
|
| 63 |
+
|
| 64 |
+
Local testing:
|
| 65 |
+
```bash
|
| 66 |
+
# Install dependencies
|
| 67 |
+
pip install -r requirements.txt
|
| 68 |
+
|
| 69 |
+
# Set up environment variables
|
| 70 |
+
cp .env.example .env
|
| 71 |
+
# Edit .env with your API keys
|
| 72 |
+
|
| 73 |
+
# Run eBird MCP server (in separate terminal)
|
| 74 |
+
python ebird_tools.py --http --port 8000
|
| 75 |
+
|
| 76 |
+
# Run the app
|
| 77 |
+
python app.py
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
**Note**: Both HuggingFace and OpenAI providers work locally and on HF Spaces. Just provide your API key in the sidebar.
|
| 81 |
+
|
| 82 |
+
## Credits
|
| 83 |
+
|
| 84 |
+
- **Bird Data**: [eBird](https://ebird.org) by Cornell Lab of Ornithology
|
| 85 |
+
- **MCP Protocol**: [Anthropic Model Context Protocol](https://github.com/anthropics/mcp)
|
| 86 |
+
- **Bird Classifier**: Custom ResNet50 model
|
agent_cache.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent cache management for per-session agents.
|
| 3 |
+
|
| 4 |
+
This module handles storing and retrieving agents for differnet users/sessions.
|
| 5 |
+
Each agent is cached by (session_id, provider, model, api_key_hash) to avoid recreating them.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from datetime import datetime, timedelta
|
| 9 |
+
from typing import Dict, Tuple, Any
|
| 10 |
+
import hashlib
|
| 11 |
+
|
| 12 |
+
# Global cache: maps (session_id, provider, model, api_key_hash, mode) -> agent
|
| 13 |
+
agent_cache: Dict[Tuple[str, str, str, str, str], Any] = {}
|
| 14 |
+
|
| 15 |
+
# Track when each agent was last used
|
| 16 |
+
agent_last_used: Dict[Tuple[str, str, str, str, str], datetime] = {}
|
| 17 |
+
|
| 18 |
+
async def get_or_create_agent(
|
| 19 |
+
session_id: str,
|
| 20 |
+
provider: str,
|
| 21 |
+
api_key: str,
|
| 22 |
+
model: str,
|
| 23 |
+
mode: str,
|
| 24 |
+
agent_factory_method
|
| 25 |
+
):
|
| 26 |
+
"""
|
| 27 |
+
Get existing agent from cache or create new one.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
session_id: Unique identifier for user session (from gr.Request)
|
| 31 |
+
provider: "huggingface" or "openai"
|
| 32 |
+
api_key: User's API key or OAuth token
|
| 33 |
+
model: Model name/repo ID
|
| 34 |
+
mode: Agent mode (e.g., "Single Agent (All Tools)", "Specialized Subagents (3 Specialists)")
|
| 35 |
+
agent_factory_method: Async function to create agent if not cached
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
Cached or newly created agent
|
| 39 |
+
|
| 40 |
+
Example:
|
| 41 |
+
agent = await get_or_create_agent(
|
| 42 |
+
session_id="abc123",
|
| 43 |
+
provider="openai",
|
| 44 |
+
api_key="sk-...",
|
| 45 |
+
model="gpt-4o-mini",
|
| 46 |
+
mode="Single Agent (All Tools)",
|
| 47 |
+
agent_factory_method=lambda: AgentFactory.create_streaming_agent_with_openai(...)
|
| 48 |
+
)
|
| 49 |
+
"""
|
| 50 |
+
# Create hash of API key to include in cache key
|
| 51 |
+
# This ensures different API keys create separate cached agents
|
| 52 |
+
api_key_hash = hashlib.sha256(api_key.encode()).hexdigest()[:16]
|
| 53 |
+
|
| 54 |
+
# Create cache key (now includes API key hash AND mode)
|
| 55 |
+
cache_key = (session_id, provider, model, api_key_hash, mode)
|
| 56 |
+
|
| 57 |
+
# Check if agent exists in cache
|
| 58 |
+
if cache_key in agent_cache:
|
| 59 |
+
print(f"[CACHE HIT] Reusing agent for session {session_id[:8]}...")
|
| 60 |
+
agent_last_used[cache_key] = datetime.now()
|
| 61 |
+
return agent_cache[cache_key]
|
| 62 |
+
|
| 63 |
+
# Cache miss - create new agent
|
| 64 |
+
print(f"[CACHE MISS] Creating new {provider} agent for session {session_id[:8]}...")
|
| 65 |
+
|
| 66 |
+
# Call the facotry method to create agent
|
| 67 |
+
agent = await agent_factory_method()
|
| 68 |
+
|
| 69 |
+
# Store in cache
|
| 70 |
+
agent_cache[cache_key] = agent
|
| 71 |
+
agent_last_used[cache_key] = datetime.now()
|
| 72 |
+
|
| 73 |
+
print(f"[CACHE] Stored agent. Total agents in cache: {len(agent_cache)}")
|
| 74 |
+
|
| 75 |
+
return agent
|
| 76 |
+
|
| 77 |
+
def cleanup_old_agents(max_age_hours: int = 1):
|
| 78 |
+
"""
|
| 79 |
+
Remove agents that haven't been used in max_age_hours.
|
| 80 |
+
|
| 81 |
+
Call this periodically to prevent memory leaks.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
max_age_hours: Remove agents older than this many hours
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
Number of agents removed
|
| 88 |
+
"""
|
| 89 |
+
now = datetime.now()
|
| 90 |
+
to_remove = []
|
| 91 |
+
|
| 92 |
+
for cache_key, last_used in agent_last_used.items():
|
| 93 |
+
age = now - last_used
|
| 94 |
+
if age > timedelta(hours=max_age_hours):
|
| 95 |
+
to_remove.append(cache_key)
|
| 96 |
+
|
| 97 |
+
# Remove old agents
|
| 98 |
+
for cache_key in to_remove:
|
| 99 |
+
print(f"[CLEANUP] Removing stale agent: {cache_key}")
|
| 100 |
+
del agent_cache[cache_key]
|
| 101 |
+
del agent_last_used[cache_key]
|
| 102 |
+
|
| 103 |
+
return len(to_remove)
|
| 104 |
+
|
| 105 |
+
def get_cache_stats():
|
| 106 |
+
"""Get statistics about the agent cache."""
|
| 107 |
+
return {
|
| 108 |
+
"total_agents": len(agent_cache),
|
| 109 |
+
"cache_keys": list(agent_cache.keys())
|
| 110 |
+
}
|
app.py
ADDED
|
@@ -0,0 +1,1780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import base64
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from langgraph_agent import AgentFactory
|
| 7 |
+
from langgraph_agent.config import AgentConfig
|
| 8 |
+
from langgraph_agent.subagent_config import SubAgentConfig
|
| 9 |
+
from langgraph_agent.prompts import BIRDSCOPE_AI_PROMPT, NUTHATCH_BIRDSCOPE_PROMPT
|
| 10 |
+
from fastmcp.client import Client
|
| 11 |
+
from fastmcp.client.transports import StreamableHttpTransport
|
| 12 |
+
from agent_cache import get_or_create_agent
|
| 13 |
+
from langgraph_agent.structured_output import parse_agent_response
|
| 14 |
+
|
| 15 |
+
# Load environment variables from .env file
|
| 16 |
+
from dotenv import load_dotenv
|
| 17 |
+
load_dotenv()
|
| 18 |
+
|
| 19 |
+
# ============================================================================
|
| 20 |
+
# EXAMPLE SETS FOR DIFFERENT AGENT MODES
|
| 21 |
+
# ============================================================================
|
| 22 |
+
|
| 23 |
+
# Shared photo examples - always shown for both modes
|
| 24 |
+
PHOTO_EXAMPLES = [
|
| 25 |
+
{"text": "What bird is this?", "files": ["examples/bird_example_1.jpg"]},
|
| 26 |
+
{"text": "Can you identify this bird?", "files": ["examples/bird_example_2.jpg"]},
|
| 27 |
+
{"text": "Identify this bird and show me similar species", "files": ["examples/bird_example_3.jpg"]}
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
# Text-only examples for Specialized Subagents mode
|
| 31 |
+
MULTI_AGENT_TEXT_EXAMPLES = [
|
| 32 |
+
"Tell me about Northern Cardinals - show me images and audio",
|
| 33 |
+
"What birds are in the Cardinalidae family?",
|
| 34 |
+
"Show me species with endangered status"
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
# Text-only examples for Audio Finder Agent mode
|
| 38 |
+
AUDIO_FINDER_TEXT_EXAMPLES = [
|
| 39 |
+
"Find me audio for any bird",
|
| 40 |
+
"Get audio recordings for Snow Goose",
|
| 41 |
+
"Find bird calls from North America",
|
| 42 |
+
"Show me audio recordings of Common Goldeneye"
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
# ============================================================================
|
| 46 |
+
# CUSTOM CSS WITH CLOUD/SKY AESTHETIC
|
| 47 |
+
# ============================================================================
|
| 48 |
+
|
| 49 |
+
custom_css = """
|
| 50 |
+
/* ========================================================================
|
| 51 |
+
GLOBAL STYLES - SKY/CLOUD AESTHETIC
|
| 52 |
+
======================================================================== */
|
| 53 |
+
|
| 54 |
+
/* Unified cloud/sky background across entire page */
|
| 55 |
+
body, html {
|
| 56 |
+
background: linear-gradient(180deg, #E0F4FF 0%, #B0E2FF 40%, #87CEEB 100%) !important;
|
| 57 |
+
min-height: 100vh !important;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
.gradio-container {
|
| 61 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif !important;
|
| 62 |
+
background:
|
| 63 |
+
/* Cloud formations - concentrated at TOP, fading down */
|
| 64 |
+
radial-gradient(ellipse 1200px 400px at 20% 0%, rgba(255, 255, 255, 0.6), transparent 70%),
|
| 65 |
+
radial-gradient(ellipse 1000px 350px at 80% 3%, rgba(255, 255, 255, 0.5), transparent 70%),
|
| 66 |
+
radial-gradient(ellipse 900px 300px at 50% 5%, rgba(255, 255, 255, 0.55), transparent 70%),
|
| 67 |
+
radial-gradient(ellipse 800px 250px at 10% 8%, rgba(255, 255, 255, 0.45), transparent 70%),
|
| 68 |
+
radial-gradient(ellipse 700px 200px at 90% 10%, rgba(255, 255, 255, 0.4), transparent 70%),
|
| 69 |
+
radial-gradient(ellipse 600px 180px at 40% 12%, rgba(255, 255, 255, 0.35), transparent 70%),
|
| 70 |
+
radial-gradient(ellipse 500px 150px at 60% 15%, rgba(255, 255, 255, 0.3), transparent 70%),
|
| 71 |
+
/* Base sky gradient - REVERSED: lighter at top, deeper blue at bottom */
|
| 72 |
+
linear-gradient(180deg, #E0F4FF 0%, #B0E2FF 40%, #87CEEB 100%) !important;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
/* ========================================================================
|
| 77 |
+
SIDEBAR STYLING - DARK THEME
|
| 78 |
+
======================================================================== */
|
| 79 |
+
|
| 80 |
+
.sidebar {
|
| 81 |
+
background: #1f2937 !important;
|
| 82 |
+
padding: 24px 20px !important;
|
| 83 |
+
border-radius: 12px !important;
|
| 84 |
+
border: 1px solid #374151 !important;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/* Hide Gradio's default loading indicator in sidebar (we use badge for loading state) */
|
| 88 |
+
.sidebar .loading,
|
| 89 |
+
.sidebar .wrap.pending,
|
| 90 |
+
.sidebar .progress-bar,
|
| 91 |
+
.sidebar [class*="loading"],
|
| 92 |
+
.sidebar [class*="progress"] {
|
| 93 |
+
display: none !important;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
/* Also hide the loading indicator that appears as a child of the sidebar */
|
| 97 |
+
.gradio-container .sidebar ~ * .loading,
|
| 98 |
+
.gradio-container .sidebar ~ * .progress-bar {
|
| 99 |
+
display: none !important;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
/* Hide Gradio's global top progress bar (the blue horizontal line) */
|
| 103 |
+
.app > div > div > .progress-level-inner,
|
| 104 |
+
body > gradio-app > div > div > div.progress-level-inner,
|
| 105 |
+
[class*="progress-level"],
|
| 106 |
+
.progress-level-inner {
|
| 107 |
+
display: none !important;
|
| 108 |
+
visibility: hidden !important;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/* Make all sidebar text light for dark background */
|
| 112 |
+
.sidebar h1,
|
| 113 |
+
.sidebar h2,
|
| 114 |
+
.sidebar h3,
|
| 115 |
+
.sidebar h4,
|
| 116 |
+
.sidebar h5,
|
| 117 |
+
.sidebar h6 {
|
| 118 |
+
color: #f9fafb !important;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
.sidebar p,
|
| 122 |
+
.sidebar span,
|
| 123 |
+
.sidebar label {
|
| 124 |
+
color: #d1d5db !important;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
/* Keep links distinguishable */
|
| 128 |
+
.sidebar a {
|
| 129 |
+
color: #818cf8 !important;
|
| 130 |
+
text-decoration: underline !important;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
.sidebar a:hover {
|
| 134 |
+
color: #a5b4fc !important;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
/* API Key sections */
|
| 138 |
+
.hf-section, .openai-section, .anthropic-section {
|
| 139 |
+
margin-top: 12px !important;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
/* Dark theme input styling */
|
| 143 |
+
.sidebar input[type="password"],
|
| 144 |
+
.sidebar input[type="text"],
|
| 145 |
+
.sidebar textarea {
|
| 146 |
+
border: 1px solid #374151 !important;
|
| 147 |
+
border-radius: 8px !important;
|
| 148 |
+
padding: 10px 14px !important;
|
| 149 |
+
font-size: 14px !important;
|
| 150 |
+
font-family: 'SF Mono', 'Monaco', 'Inconsolata', monospace !important;
|
| 151 |
+
background: #111827 !important;
|
| 152 |
+
color: #f9fafb !important;
|
| 153 |
+
transition: all 0.2s ease !important;
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
.sidebar input[type="password"]::placeholder,
|
| 157 |
+
.sidebar input[type="text"]::placeholder,
|
| 158 |
+
.sidebar textarea::placeholder {
|
| 159 |
+
color: #6b7280 !important;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
.sidebar input[type="password"]:focus,
|
| 163 |
+
.sidebar input[type="text"]:focus,
|
| 164 |
+
.sidebar textarea:focus {
|
| 165 |
+
border-color: #818cf8 !important;
|
| 166 |
+
box-shadow: 0 0 0 2px rgba(129, 140, 248, 0.2) !important;
|
| 167 |
+
outline: none !important;
|
| 168 |
+
background: #1f2937 !important;
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
/* ========================================================================
|
| 172 |
+
CHATBOT & TOOL LOG PANELS
|
| 173 |
+
======================================================================== */
|
| 174 |
+
|
| 175 |
+
.chatbot-container {
|
| 176 |
+
border-radius: 12px !important;
|
| 177 |
+
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08) !important;
|
| 178 |
+
border: 1px solid #e5e7eb !important;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
/* Force icon SVG elements to use light colors for visibility on dark background */
|
| 182 |
+
.chatbot-container svg,
|
| 183 |
+
.chatbot-container svg path,
|
| 184 |
+
.chatbot-container svg circle,
|
| 185 |
+
.chatbot-container svg rect {
|
| 186 |
+
fill: #d1d5db !important;
|
| 187 |
+
stroke: #d1d5db !important;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
.tool-log-panel textarea {
|
| 191 |
+
background: #1f2937 !important;
|
| 192 |
+
border-radius: 12px !important;
|
| 193 |
+
padding: 20px !important;
|
| 194 |
+
border: 1px solid #374151 !important;
|
| 195 |
+
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08) !important;
|
| 196 |
+
font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Consolas', monospace !important;
|
| 197 |
+
font-size: 13px !important;
|
| 198 |
+
line-height: 1.6 !important;
|
| 199 |
+
color: #d1d5db !important;
|
| 200 |
+
resize: none !important;
|
| 201 |
+
height: 500px !important;
|
| 202 |
+
min-height: 500px !important;
|
| 203 |
+
max-height: 500px !important;
|
| 204 |
+
overflow-y: auto !important;
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
/* Ensure tool log panel container aligns perfectly */
|
| 208 |
+
.tool-log-panel {
|
| 209 |
+
margin: 0 !important;
|
| 210 |
+
padding: 0 !important;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
.tool-log-panel textarea::-webkit-scrollbar {
|
| 214 |
+
width: 8px !important;
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
.tool-log-panel textarea::-webkit-scrollbar-track {
|
| 218 |
+
background: #111827 !important;
|
| 219 |
+
border-radius: 4px !important;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
.tool-log-panel textarea::-webkit-scrollbar-thumb {
|
| 223 |
+
background: #4b5563 !important;
|
| 224 |
+
border-radius: 4px !important;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
.tool-log-panel textarea::-webkit-scrollbar-thumb:hover {
|
| 228 |
+
background: #6b7280 !important;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
hr {
|
| 232 |
+
border: none !important;
|
| 233 |
+
border-top: 1px solid #374151 !important;
|
| 234 |
+
margin: 20px 0 !important;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
.sidebar hr {
|
| 238 |
+
border-top-color: #374151 !important;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
/* ========================================================================
|
| 242 |
+
TEXT ON LIGHT BACKGROUND - MAKE DARK FOR READABILITY
|
| 243 |
+
======================================================================== */
|
| 244 |
+
|
| 245 |
+
/* All text elements outside dark panels should be dark for readability */
|
| 246 |
+
.gradio-container label:not(.sidebar label):not(.tool-log-panel label):not(.chatbot-container label),
|
| 247 |
+
.gradio-container span:not(.sidebar span):not(.tool-log-panel span):not(.chatbot-container span):not(.birdscope-header span),
|
| 248 |
+
.gradio-container p:not(.sidebar p):not(.tool-log-panel p):not(.chatbot-container p):not(.birdscope-header p),
|
| 249 |
+
.gradio-container div:not(.sidebar div):not(.tool-log-panel div):not(.chatbot-container div):not(.birdscope-header div) {
|
| 250 |
+
color: #1a1a1a !important;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
/* Markdown text outside dark panels */
|
| 254 |
+
.gradio-container .markdown:not(.sidebar .markdown):not(.tool-log-panel .markdown) {
|
| 255 |
+
color: #1a1a1a !important;
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
/* Markdown headings - ensure all are black on light background (except sidebar) */
|
| 259 |
+
.gradio-container .markdown:not(.sidebar .markdown) h1,
|
| 260 |
+
.gradio-container .markdown:not(.sidebar .markdown) h2,
|
| 261 |
+
.gradio-container .markdown:not(.sidebar .markdown) h3,
|
| 262 |
+
.gradio-container .markdown:not(.sidebar .markdown) h4,
|
| 263 |
+
.gradio-container .markdown:not(.sidebar .markdown) h5,
|
| 264 |
+
.gradio-container .markdown:not(.sidebar .markdown) h6 {
|
| 265 |
+
color: #1a1a1a !important;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
/* Regular buttons (not primary) should have dark text */
|
| 269 |
+
button:not([variant="primary"]) {
|
| 270 |
+
color: #1a1a1a !important;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
/* BUT sidebar buttons should have light text (override above) */
|
| 274 |
+
.sidebar button:not([variant="primary"]),
|
| 275 |
+
.sidebar button:not([variant="primary"]) span,
|
| 276 |
+
.sidebar button:not([variant="primary"]) * {
|
| 277 |
+
color: #f9fafb !important;
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
/* Modal check button with logo */
|
| 281 |
+
.modal-check-btn {
|
| 282 |
+
background: rgba(59, 130, 246, 0.1) !important;
|
| 283 |
+
border: 1px solid rgba(59, 130, 246, 0.3) !important;
|
| 284 |
+
border-radius: 9999px !important;
|
| 285 |
+
transition: all 0.2s ease !important;
|
| 286 |
+
cursor: pointer !important;
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
.modal-check-btn:hover {
|
| 290 |
+
background: rgba(59, 130, 246, 0.2) !important;
|
| 291 |
+
border-color: rgba(59, 130, 246, 0.5) !important;
|
| 292 |
+
transform: translateY(-1px);
|
| 293 |
+
box-shadow: 0 2px 8px rgba(59, 130, 246, 0.3) !important;
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
.modal-check-btn:active {
|
| 297 |
+
transform: translateY(0);
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
.modal-check-btn::before {
|
| 301 |
+
content: "";
|
| 302 |
+
display: inline-block;
|
| 303 |
+
width: 18px;
|
| 304 |
+
height: 18px;
|
| 305 |
+
margin-right: 8px;
|
| 306 |
+
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100' fill='none'%3E%3C!-- Left ribbon --%3E%3Cpath d='M0 30 L25 15 L50 30 L50 70 L25 85 L0 70 Z' fill='%2335D07F'/%3E%3Cpath d='M25 15 L50 30 L25 45 L0 30 Z' fill='%2388E5A8'/%3E%3Cpath d='M25 45 L50 30 L50 70 L25 85 Z' fill='%2315B866'/%3E%3C!-- Right ribbon --%3E%3Cpath d='M50 30 L75 15 L100 30 L100 70 L75 85 L50 70 Z' fill='%2335D07F'/%3E%3Cpath d='M75 15 L100 30 L75 45 L50 30 Z' fill='%2388E5A8'/%3E%3Cpath d='M75 45 L100 30 L100 70 L75 85 Z' fill='%2315B866'/%3E%3C/svg%3E");
|
| 307 |
+
background-size: contain;
|
| 308 |
+
background-repeat: no-repeat;
|
| 309 |
+
background-position: center;
|
| 310 |
+
vertical-align: middle;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
/* ========================================================================
|
| 314 |
+
EXAMPLES - BLACK TEXT FOR READABILITY
|
| 315 |
+
======================================================================== */
|
| 316 |
+
|
| 317 |
+
/* Examples label - force black text with very high specificity */
|
| 318 |
+
label.svelte-1gfkn6j,
|
| 319 |
+
.label,
|
| 320 |
+
span.svelte-1gfkn6j {
|
| 321 |
+
color: #1a1a1a !important;
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
/* Target example buttons specifically, excluding footer */
|
| 325 |
+
.gradio-container button:not([variant="primary"]):not(.sidebar button):not(footer button):not([class*="footer"] button) {
|
| 326 |
+
color: #1a1a1a !important;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
/* Footer text should be black on light background */
|
| 330 |
+
footer,
|
| 331 |
+
footer *,
|
| 332 |
+
footer a,
|
| 333 |
+
[class*="footer"],
|
| 334 |
+
[class*="footer"] *,
|
| 335 |
+
[class*="footer"] a {
|
| 336 |
+
color: #1a1a1a !important;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
/* ========================================================================
|
| 340 |
+
ENHANCED HEADER - BIRDSCOPE BRANDING
|
| 341 |
+
======================================================================== */
|
| 342 |
+
|
| 343 |
+
@import url('https://fonts.googleapis.com/css2?family=Quicksand:wght@500;700&display=swap');
|
| 344 |
+
|
| 345 |
+
.birdscope-header {
|
| 346 |
+
position: relative;
|
| 347 |
+
overflow: hidden;
|
| 348 |
+
padding: 2rem 1.5rem;
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
/* Decorative cloud elements */
|
| 352 |
+
.cloud-decor-1 {
|
| 353 |
+
position: absolute;
|
| 354 |
+
top: -2.5rem;
|
| 355 |
+
right: 2.5rem;
|
| 356 |
+
width: 10rem;
|
| 357 |
+
height: 10rem;
|
| 358 |
+
background: rgba(255, 255, 255, 0.4);
|
| 359 |
+
border-radius: 50%;
|
| 360 |
+
filter: blur(60px);
|
| 361 |
+
pointer-events: none;
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
.cloud-decor-2 {
|
| 365 |
+
position: absolute;
|
| 366 |
+
top: 0;
|
| 367 |
+
right: 33%;
|
| 368 |
+
width: 8rem;
|
| 369 |
+
height: 8rem;
|
| 370 |
+
background: rgba(224, 242, 254, 0.5);
|
| 371 |
+
border-radius: 50%;
|
| 372 |
+
filter: blur(40px);
|
| 373 |
+
pointer-events: none;
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
.cloud-decor-3 {
|
| 377 |
+
position: absolute;
|
| 378 |
+
top: -1.25rem;
|
| 379 |
+
left: 5rem;
|
| 380 |
+
width: 6rem;
|
| 381 |
+
height: 6rem;
|
| 382 |
+
background: rgba(255, 255, 255, 0.3);
|
| 383 |
+
border-radius: 50%;
|
| 384 |
+
filter: blur(40px);
|
| 385 |
+
pointer-events: none;
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
/* Flying birds animation */
|
| 389 |
+
@keyframes drift {
|
| 390 |
+
0%, 100% { transform: translateX(0) translateY(0); }
|
| 391 |
+
50% { transform: translateX(10px) translateY(-5px); }
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
@keyframes fadeIn {
|
| 395 |
+
from { opacity: 0; transform: translateY(5px); }
|
| 396 |
+
to { opacity: 1; transform: translateY(0); }
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
.bird-silhouette {
|
| 400 |
+
position: absolute;
|
| 401 |
+
animation: drift 8s ease-in-out infinite;
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
.bird-1 { top: 1.5rem; right: 8rem; width: 1.25rem; height: 1.25rem; color: rgba(148, 163, 184, 0.3); }
|
| 405 |
+
.bird-2 { top: 2.5rem; right: 12rem; width: 1rem; height: 1rem; color: rgba(148, 163, 184, 0.2); animation-delay: 1s; }
|
| 406 |
+
.bird-3 { top: 1rem; right: 16rem; width: 0.75rem; height: 0.75rem; color: rgba(148, 163, 184, 0.15); animation-delay: 2s; }
|
| 407 |
+
|
| 408 |
+
/* Logo container */
|
| 409 |
+
.bird-logo-wrapper {
|
| 410 |
+
position: relative;
|
| 411 |
+
display: inline-block;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
.bird-logo-glow {
|
| 415 |
+
position: absolute;
|
| 416 |
+
inset: 0;
|
| 417 |
+
background: linear-gradient(135deg, #38bdf8 0%, #3b82f6 100%);
|
| 418 |
+
border-radius: 1rem;
|
| 419 |
+
filter: blur(8px);
|
| 420 |
+
opacity: 0.3;
|
| 421 |
+
transition: opacity 0.3s;
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
.bird-logo-wrapper:hover .bird-logo-glow {
|
| 425 |
+
opacity: 0.5;
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
.bird-logo {
|
| 429 |
+
position: relative;
|
| 430 |
+
background: linear-gradient(135deg, #38bdf8 0%, #3b82f6 100%);
|
| 431 |
+
padding: 0.75rem;
|
| 432 |
+
border-radius: 1rem;
|
| 433 |
+
box-shadow: 0 10px 25px rgba(56, 189, 248, 0.2);
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
/* Header content */
|
| 437 |
+
.header-content {
|
| 438 |
+
position: relative;
|
| 439 |
+
z-index: 10;
|
| 440 |
+
max-width: 72rem;
|
| 441 |
+
margin: 0 auto;
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
.header-top {
|
| 445 |
+
display: flex;
|
| 446 |
+
align-items: center;
|
| 447 |
+
gap: 1rem;
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
.header-title-group h1 {
|
| 451 |
+
font-family: 'Quicksand', 'Nunito', sans-serif !important;
|
| 452 |
+
font-size: 1.875rem !important;
|
| 453 |
+
font-weight: 700 !important;
|
| 454 |
+
color: #1e293b !important;
|
| 455 |
+
letter-spacing: -0.025em !important;
|
| 456 |
+
margin: 0 !important;
|
| 457 |
+
display: inline !important;
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
.header-ai-text {
|
| 461 |
+
font-size: 1.5rem;
|
| 462 |
+
font-weight: 300;
|
| 463 |
+
color: #0ea5e9;
|
| 464 |
+
margin-left: 0.5rem;
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
.header-v2-badge {
|
| 468 |
+
display: inline-block;
|
| 469 |
+
padding: 0.125rem 0.5rem;
|
| 470 |
+
font-size: 0.75rem;
|
| 471 |
+
font-weight: 600;
|
| 472 |
+
background: linear-gradient(to right, #fbbf24, #f97316);
|
| 473 |
+
color: white;
|
| 474 |
+
border-radius: 9999px;
|
| 475 |
+
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1);
|
| 476 |
+
margin-left: 0.5rem;
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
.header-subtitle {
|
| 480 |
+
color: #64748b !important;
|
| 481 |
+
font-size: 0.875rem !important;
|
| 482 |
+
margin-top: 0.125rem !important;
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
.mcp-badge {
|
| 486 |
+
display: inline-flex;
|
| 487 |
+
align-items: center;
|
| 488 |
+
gap: 0.5rem;
|
| 489 |
+
padding: 0.375rem 0.75rem;
|
| 490 |
+
background: rgba(255, 255, 255, 0.6);
|
| 491 |
+
backdrop-filter: blur(8px);
|
| 492 |
+
border: 1px solid #e2e8f0;
|
| 493 |
+
border-radius: 6px;
|
| 494 |
+
font-size: 0.75rem;
|
| 495 |
+
color: #1a1a1a !important;
|
| 496 |
+
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
|
| 497 |
+
margin-left: auto;
|
| 498 |
+
user-select: none;
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
.mcp-badge span {
|
| 502 |
+
color: #1a1a1a !important;
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
.mcp-badge.checking {
|
| 506 |
+
animation: badgePulse 1.5s ease-in-out infinite !important;
|
| 507 |
+
background: rgba(251, 191, 36, 0.15) !important;
|
| 508 |
+
border-color: #fbbf24 !important;
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
/* White text while checking */
|
| 512 |
+
.mcp-badge.checking span {
|
| 513 |
+
color: #ffffff !important;
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
/* Disable hover effects while checking */
|
| 517 |
+
.mcp-badge.checking:hover {
|
| 518 |
+
transform: none !important;
|
| 519 |
+
animation: badgePulse 1.5s ease-in-out infinite !important;
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
.mcp-badge.checking .mcp-pulse {
|
| 523 |
+
background: #fbbf24;
|
| 524 |
+
animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
.mcp-badge.offline .mcp-pulse {
|
| 528 |
+
background: #ef4444;
|
| 529 |
+
animation: none;
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
.mcp-badge.online .mcp-pulse {
|
| 533 |
+
background: #34d399;
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
.mcp-pulse {
|
| 537 |
+
width: 0.5rem;
|
| 538 |
+
height: 0.5rem;
|
| 539 |
+
background: #34d399;
|
| 540 |
+
border-radius: 50%;
|
| 541 |
+
animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
@keyframes pulse {
|
| 545 |
+
0%, 100% { opacity: 1; }
|
| 546 |
+
50% { opacity: 0.5; }
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
@keyframes badgePulse {
|
| 550 |
+
0%, 100% {
|
| 551 |
+
opacity: 1;
|
| 552 |
+
transform: scale(1);
|
| 553 |
+
}
|
| 554 |
+
50% {
|
| 555 |
+
opacity: 0.8;
|
| 556 |
+
transform: scale(1.08);
|
| 557 |
+
}
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
/* Feature tags */
|
| 561 |
+
.feature-tags {
|
| 562 |
+
margin-top: 1.25rem;
|
| 563 |
+
display: flex;
|
| 564 |
+
flex-wrap: wrap;
|
| 565 |
+
gap: 0.5rem;
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
.feature-tag {
|
| 569 |
+
display: inline-flex;
|
| 570 |
+
align-items: center;
|
| 571 |
+
gap: 0.5rem;
|
| 572 |
+
padding: 0.375rem 0.75rem;
|
| 573 |
+
background: rgba(255, 255, 255, 0.7);
|
| 574 |
+
backdrop-filter: blur(8px);
|
| 575 |
+
border: 1px solid rgba(226, 232, 240, 0.8);
|
| 576 |
+
border-radius: 9999px;
|
| 577 |
+
font-size: 0.875rem;
|
| 578 |
+
color: #1a1a1a !important;
|
| 579 |
+
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
|
| 580 |
+
transition: all 0.2s;
|
| 581 |
+
cursor: default;
|
| 582 |
+
animation: fadeIn 0.4s ease-out forwards;
|
| 583 |
+
opacity: 0;
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
.feature-tag span {
|
| 587 |
+
color: #1a1a1a !important;
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
.feature-tag:hover {
|
| 591 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
| 592 |
+
border-color: #7dd3fc;
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
.feature-tag:nth-child(1) { animation-delay: 0ms; }
|
| 596 |
+
.feature-tag:nth-child(2) { animation-delay: 80ms; }
|
| 597 |
+
.feature-tag:nth-child(3) { animation-delay: 160ms; }
|
| 598 |
+
.feature-tag:nth-child(4) { animation-delay: 240ms; }
|
| 599 |
+
|
| 600 |
+
/* Bottom border */
|
| 601 |
+
.header-border {
|
| 602 |
+
position: absolute;
|
| 603 |
+
bottom: 0;
|
| 604 |
+
left: 0;
|
| 605 |
+
right: 0;
|
| 606 |
+
height: 1px;
|
| 607 |
+
background: linear-gradient(to right, transparent, #e2e8f0, transparent);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
/* Mobile responsive */
|
| 611 |
+
@media (max-width: 640px) {
|
| 612 |
+
.mcp-badge {
|
| 613 |
+
display: none;
|
| 614 |
+
}
|
| 615 |
+
.header-top {
|
| 616 |
+
flex-direction: column;
|
| 617 |
+
align-items: flex-start;
|
| 618 |
+
}
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
/* ========================================================================
|
| 622 |
+
ONBOARDING FLOW STYLING
|
| 623 |
+
======================================================================== */
|
| 624 |
+
|
| 625 |
+
/* Center and constrain onboarding pages */
|
| 626 |
+
.onboarding-page {
|
| 627 |
+
max-width: 500px !important;
|
| 628 |
+
margin: 2rem auto !important;
|
| 629 |
+
padding: 32px !important;
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
/* Ensure welcome text is visible on dark background */
|
| 633 |
+
.welcome-text h1, .api-key-text h1 {
|
| 634 |
+
color: #f9fafb !important;
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
/* Scroll animation for step transitions */
|
| 638 |
+
.onboarding-page {
|
| 639 |
+
animation: fadeInStep 0.3s ease-out;
|
| 640 |
+
}
|
| 641 |
+
|
| 642 |
+
@keyframes fadeInStep {
|
| 643 |
+
from {
|
| 644 |
+
opacity: 0.5;
|
| 645 |
+
transform: translateY(10px);
|
| 646 |
+
}
|
| 647 |
+
to {
|
| 648 |
+
opacity: 1;
|
| 649 |
+
transform: translateY(0);
|
| 650 |
+
}
|
| 651 |
+
}
|
| 652 |
+
"""
|
| 653 |
+
|
| 654 |
+
# ============================================================================
|
| 655 |
+
# CHAT FUNCTIONS - DUAL OUTPUT (CHAT + TOOL LOG)
|
| 656 |
+
# ============================================================================
|
| 657 |
+
|
| 658 |
+
def format_tool_output_for_chat(tool_output):
|
| 659 |
+
"""
|
| 660 |
+
Parse tool output and format images/content for display in chatbot.
|
| 661 |
+
Detects image URLs and converts them to markdown image syntax.
|
| 662 |
+
"""
|
| 663 |
+
import re
|
| 664 |
+
|
| 665 |
+
output_str = str(tool_output)
|
| 666 |
+
|
| 667 |
+
# Pattern to match image URLs (common image formats)
|
| 668 |
+
image_pattern = r'(https?://[^\s<>"{}|\\^\[\]`]+\.(?:jpg|jpeg|png|gif|webp|svg)(?:\?[^\s]*)?)'
|
| 669 |
+
|
| 670 |
+
# Find all image URLs
|
| 671 |
+
image_urls = re.findall(image_pattern, output_str, re.IGNORECASE)
|
| 672 |
+
|
| 673 |
+
if image_urls:
|
| 674 |
+
# Format images as markdown
|
| 675 |
+
formatted_output = ""
|
| 676 |
+
for url in image_urls[:3]: # Limit to first 3 images to avoid clutter
|
| 677 |
+
formatted_output += f"\n\n"
|
| 678 |
+
return formatted_output
|
| 679 |
+
|
| 680 |
+
# If no images, return truncated text
|
| 681 |
+
if len(output_str) > 200:
|
| 682 |
+
return output_str[:200] + "...\n\n"
|
| 683 |
+
|
| 684 |
+
return output_str + "\n\n" if output_str else ""
|
| 685 |
+
|
| 686 |
+
async def chat_with_tool_visibility(
|
| 687 |
+
message,
|
| 688 |
+
history,
|
| 689 |
+
provider,
|
| 690 |
+
hf_key,
|
| 691 |
+
openai_key,
|
| 692 |
+
anthropic_key,
|
| 693 |
+
agent_mode,
|
| 694 |
+
request: gr.Request
|
| 695 |
+
):
|
| 696 |
+
"""
|
| 697 |
+
Dual-output streaming: chat response + tool execution log
|
| 698 |
+
|
| 699 |
+
Yields: tuple(chat_response_text, tool_log_markdown)
|
| 700 |
+
"""
|
| 701 |
+
# -------------------------------------------------------------------------
|
| 702 |
+
# 1. VALIDATE CREDENTIALS & SELECT PROVIDER
|
| 703 |
+
# -------------------------------------------------------------------------
|
| 704 |
+
if provider == "HuggingFace":
|
| 705 |
+
api_key = (hf_key.strip() if hf_key and hf_key.strip()
|
| 706 |
+
else os.getenv("HF_API_KEY", ""))
|
| 707 |
+
if not api_key:
|
| 708 |
+
yield "**API Key Required**\n\nPlease enter your HuggingFace API key in the sidebar.", "*Waiting for API key...*"
|
| 709 |
+
return
|
| 710 |
+
provider_key = "huggingface"
|
| 711 |
+
model = AgentConfig.DEFAULT_HF_MODEL
|
| 712 |
+
elif provider == "Anthropic":
|
| 713 |
+
api_key = (anthropic_key.strip() if anthropic_key and anthropic_key.strip()
|
| 714 |
+
else os.getenv("ANTHROPIC_API_KEY", ""))
|
| 715 |
+
if not api_key:
|
| 716 |
+
yield "**API Key Required**\n\nPlease enter your Anthropic API key in the sidebar.", "*Waiting for API key...*"
|
| 717 |
+
return
|
| 718 |
+
provider_key = "anthropic"
|
| 719 |
+
model = AgentConfig.DEFAULT_ANTHROPIC_MODEL
|
| 720 |
+
else: # OpenAI
|
| 721 |
+
api_key = (openai_key.strip() if openai_key and openai_key.strip()
|
| 722 |
+
else os.getenv("OPENAI_API_KEY", ""))
|
| 723 |
+
if not api_key:
|
| 724 |
+
yield "**API Key Required**\n\nPlease enter your OpenAI API key in the sidebar.", "*Waiting for API key...*"
|
| 725 |
+
return
|
| 726 |
+
provider_key = "openai"
|
| 727 |
+
model = AgentConfig.DEFAULT_OPENAI_MODEL
|
| 728 |
+
|
| 729 |
+
# -------------------------------------------------------------------------
|
| 730 |
+
# 2. GET OR CREATE AGENT
|
| 731 |
+
# -------------------------------------------------------------------------
|
| 732 |
+
try:
|
| 733 |
+
session_id = request.session_hash
|
| 734 |
+
|
| 735 |
+
# Get or create agent (unified subagent architecture)
|
| 736 |
+
agent = await get_or_create_agent(
|
| 737 |
+
session_id=session_id,
|
| 738 |
+
provider=provider_key,
|
| 739 |
+
api_key=api_key,
|
| 740 |
+
model=model,
|
| 741 |
+
mode=agent_mode, # Include mode in cache key
|
| 742 |
+
agent_factory_method=lambda: AgentFactory.create_subagent_orchestrator(
|
| 743 |
+
model=model,
|
| 744 |
+
api_key=api_key,
|
| 745 |
+
provider=provider_key,
|
| 746 |
+
mode=agent_mode # Pass mode to determine agent composition
|
| 747 |
+
)
|
| 748 |
+
)
|
| 749 |
+
except Exception as e:
|
| 750 |
+
yield f"**Agent Creation Failed**\n\n{str(e)}", "*Agent creation failed*"
|
| 751 |
+
return
|
| 752 |
+
|
| 753 |
+
config = {"configurable": {"thread_id": session_id}}
|
| 754 |
+
|
| 755 |
+
# -------------------------------------------------------------------------
|
| 756 |
+
# 3. PARSE MESSAGE & HANDLE IMAGE UPLOADS
|
| 757 |
+
# -------------------------------------------------------------------------
|
| 758 |
+
# Separate accumulators for chat and tool log
|
| 759 |
+
chat_response = ""
|
| 760 |
+
tool_log = ""
|
| 761 |
+
tool_count = 0
|
| 762 |
+
|
| 763 |
+
user_text = ""
|
| 764 |
+
if isinstance(message, dict):
|
| 765 |
+
user_text = message.get("text", "")
|
| 766 |
+
print(f"[DEBUG MESSAGE] User query: {user_text}") # DEBUG
|
| 767 |
+
files = message.get("files", [])
|
| 768 |
+
|
| 769 |
+
# Handle image uploads
|
| 770 |
+
if files and len(files) > 0:
|
| 771 |
+
image_path = files[0]
|
| 772 |
+
|
| 773 |
+
if image_path.startswith("http"):
|
| 774 |
+
# URL - agent will call classify_from_url
|
| 775 |
+
user_text += f"\n\nWhat bird is this? {image_path}"
|
| 776 |
+
else:
|
| 777 |
+
# Local file - call MCP tool directly (show in tool log)
|
| 778 |
+
tool_log += "🟢 Pre-Classification (Direct MODAL MCP Call)\n"
|
| 779 |
+
tool_log += "Tool: classify_from_base64\n"
|
| 780 |
+
tool_log += "Status: Calling Modal GPU classifier directly to avoid token limits...\n\n"
|
| 781 |
+
yield chat_response, tool_log
|
| 782 |
+
|
| 783 |
+
with open(image_path, "rb") as img_file:
|
| 784 |
+
image_data = base64.b64encode(img_file.read()).decode('utf-8')
|
| 785 |
+
|
| 786 |
+
# Direct MCP call
|
| 787 |
+
transport = StreamableHttpTransport(
|
| 788 |
+
url=AgentConfig.MODAL_MCP_URL,
|
| 789 |
+
headers={"X-API-Key": AgentConfig.BIRD_CLASSIFIER_API_KEY}
|
| 790 |
+
)
|
| 791 |
+
async with Client(transport) as client:
|
| 792 |
+
result = await client.call_tool(
|
| 793 |
+
"classify_from_base64",
|
| 794 |
+
arguments={"image_data": image_data}
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
if result and result.content:
|
| 798 |
+
classification = json.loads(result.content[0].text)
|
| 799 |
+
species = classification.get("species", "Unknown")
|
| 800 |
+
confidence = classification.get("confidence", 0)
|
| 801 |
+
|
| 802 |
+
# Update tool log
|
| 803 |
+
tool_log += f"✅ Result: {species} ({confidence:.1%})\n"
|
| 804 |
+
tool_log += f"{json.dumps(classification, indent=2)}\n\n"
|
| 805 |
+
tool_log += "---\n\n"
|
| 806 |
+
|
| 807 |
+
# Update user message
|
| 808 |
+
user_text += f"\n\nI uploaded a bird image. The classifier identified it as: {species} (confidence: {confidence:.1%}). Can you tell me more about this bird?"
|
| 809 |
+
else:
|
| 810 |
+
tool_log += "❌ Failed\n\n---\n\n"
|
| 811 |
+
user_text += "\n\n⚠️ Failed to classify the uploaded image."
|
| 812 |
+
|
| 813 |
+
yield chat_response, tool_log
|
| 814 |
+
else:
|
| 815 |
+
user_text = message
|
| 816 |
+
|
| 817 |
+
# -------------------------------------------------------------------------
|
| 818 |
+
# 4. STREAM AGENT RESPONSE WITH TOOL VISIBILITY
|
| 819 |
+
# -------------------------------------------------------------------------
|
| 820 |
+
print(f"[DEBUG AGENT INPUT] Sending to agent: {user_text}") # DEBUG
|
| 821 |
+
async for event in agent.astream_events(
|
| 822 |
+
{"messages": [{"role": "user", "content": user_text}]},
|
| 823 |
+
config,
|
| 824 |
+
version="v2"
|
| 825 |
+
):
|
| 826 |
+
kind = event["event"]
|
| 827 |
+
|
| 828 |
+
# Tool call started
|
| 829 |
+
if kind == "on_tool_start":
|
| 830 |
+
tool_count += 1
|
| 831 |
+
tool_name = event["name"]
|
| 832 |
+
tool_input = event.get("data", {}).get("input", {})
|
| 833 |
+
|
| 834 |
+
# Add to tool log
|
| 835 |
+
tool_log += f"\n🟢 Tool #{tool_count}: {tool_name}\n"
|
| 836 |
+
tool_log += f"Status: Running...\n"
|
| 837 |
+
tool_log += f"Input:\n{json.dumps(tool_input, indent=2)}\n\n"
|
| 838 |
+
|
| 839 |
+
# Also add visual indicator to chat (wrapped in semantic tag)
|
| 840 |
+
chat_response += f"\n\n<tool_call>🔧 Using {tool_name}...</tool_call>\n\n"
|
| 841 |
+
|
| 842 |
+
yield chat_response, tool_log
|
| 843 |
+
|
| 844 |
+
# LLM streaming tokens
|
| 845 |
+
elif kind == "on_chat_model_stream":
|
| 846 |
+
content = event["data"]["chunk"].content
|
| 847 |
+
if content:
|
| 848 |
+
# Handle both string (OpenAI) and list (Anthropic) content formats
|
| 849 |
+
if isinstance(content, list):
|
| 850 |
+
# Anthropic returns list of content blocks - extract text
|
| 851 |
+
for block in content:
|
| 852 |
+
if hasattr(block, 'text'):
|
| 853 |
+
chat_response += block.text
|
| 854 |
+
elif isinstance(block, dict) and 'text' in block:
|
| 855 |
+
chat_response += block['text']
|
| 856 |
+
else:
|
| 857 |
+
# OpenAI/HF return string directly
|
| 858 |
+
chat_response += content
|
| 859 |
+
yield chat_response, tool_log
|
| 860 |
+
|
| 861 |
+
# Tool finished
|
| 862 |
+
elif kind == "on_tool_end":
|
| 863 |
+
tool_output = event.get("data", {}).get("output", "")
|
| 864 |
+
|
| 865 |
+
# Format output for tool log (truncate if needed)
|
| 866 |
+
output_str = str(tool_output)
|
| 867 |
+
if len(output_str) > 1000:
|
| 868 |
+
output_str = output_str[:1000] + "\n...(truncated)"
|
| 869 |
+
|
| 870 |
+
# Add to tool log
|
| 871 |
+
tool_log += f"✅ Status: Completed\n"
|
| 872 |
+
tool_log += f"Output:\n{output_str}\n\n"
|
| 873 |
+
tool_log += "---\n\n"
|
| 874 |
+
|
| 875 |
+
# Format output for chat display (with image rendering)
|
| 876 |
+
formatted_output = format_tool_output_for_chat(tool_output)
|
| 877 |
+
if formatted_output.strip():
|
| 878 |
+
chat_response += formatted_output
|
| 879 |
+
|
| 880 |
+
yield chat_response, tool_log
|
| 881 |
+
|
| 882 |
+
# Final yield
|
| 883 |
+
## NEW: Updated with LlamaIndex OutputPraser
|
| 884 |
+
# yield chat_response, tool_log
|
| 885 |
+
try:
|
| 886 |
+
from langgraph_agent.structured_output import parse_agent_response
|
| 887 |
+
formatted_response = await parse_agent_response(
|
| 888 |
+
raw_response=chat_response,
|
| 889 |
+
provider=provider_key,
|
| 890 |
+
api_key=api_key,
|
| 891 |
+
model=model
|
| 892 |
+
)
|
| 893 |
+
yield formatted_response, tool_log
|
| 894 |
+
except ImportError:
|
| 895 |
+
# Fallback if LlamaIndex not installed
|
| 896 |
+
yield chat_response, tool_log
|
| 897 |
+
except Exception as e:
|
| 898 |
+
# Fallback if parsing fails
|
| 899 |
+
print(f"[STRUCTURED OUTPUT ERROR]: {e}")
|
| 900 |
+
yield chat_response, tool_log
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
# ============================================================================
|
| 904 |
+
# MODAL SERVER HEALTH CHECK
|
| 905 |
+
# ============================================================================
|
| 906 |
+
|
| 907 |
+
async def check_modal_server_health():
|
| 908 |
+
"""
|
| 909 |
+
Check if Modal MCP server is alive and warm.
|
| 910 |
+
Returns status message for UI display.
|
| 911 |
+
"""
|
| 912 |
+
import asyncio
|
| 913 |
+
|
| 914 |
+
print("[DEBUG] Health check started...")
|
| 915 |
+
|
| 916 |
+
async def do_health_check():
|
| 917 |
+
transport = StreamableHttpTransport(
|
| 918 |
+
url=AgentConfig.MODAL_MCP_URL,
|
| 919 |
+
headers={"X-API-Key": AgentConfig.BIRD_CLASSIFIER_API_KEY}
|
| 920 |
+
)
|
| 921 |
+
|
| 922 |
+
async with Client(transport) as client:
|
| 923 |
+
# Try to list tools as a health check
|
| 924 |
+
tools = await client.list_tools()
|
| 925 |
+
if tools and len(tools) > 0:
|
| 926 |
+
return f"✅ Online ({len(tools)} tools ready)"
|
| 927 |
+
else:
|
| 928 |
+
return "⚠️ Server responded but no tools found"
|
| 929 |
+
|
| 930 |
+
try:
|
| 931 |
+
# Wrap in timeout - Modal cold starts can take 30-60 seconds
|
| 932 |
+
result = await asyncio.wait_for(do_health_check(), timeout=60.0)
|
| 933 |
+
print(f"[DEBUG] Health check result: {result}")
|
| 934 |
+
return result
|
| 935 |
+
|
| 936 |
+
except asyncio.TimeoutError:
|
| 937 |
+
print("[DEBUG] Health check timeout")
|
| 938 |
+
return "⏱️ Timeout (still warming up...)"
|
| 939 |
+
except Exception as e:
|
| 940 |
+
print(f"[DEBUG] Health check error: {e}")
|
| 941 |
+
error_msg = str(e)
|
| 942 |
+
if "401" in error_msg or "Unauthorized" in error_msg:
|
| 943 |
+
return "🔐 Auth failed"
|
| 944 |
+
elif "timeout" in error_msg.lower():
|
| 945 |
+
return "⏱️ Timeout (waking up...)"
|
| 946 |
+
else:
|
| 947 |
+
return f"❌ Offline"
|
| 948 |
+
|
| 949 |
+
# Wrapper to convert to Gradio 6 message format
|
| 950 |
+
async def chat_wrapper(message, history, provider, hf_key, openai_key, anthropic_key, agent_mode, tool_log_state, request: gr.Request):
|
| 951 |
+
"""
|
| 952 |
+
Wrapper to convert chat outputs to Gradio 6 message format.
|
| 953 |
+
|
| 954 |
+
Returns: (updated_history, updated_tool_log)
|
| 955 |
+
"""
|
| 956 |
+
# Debug: print received API keys
|
| 957 |
+
print(f"[DEBUG] chat_wrapper received - provider: {provider}, hf_key: {'***' if hf_key else 'None'}, openai_key: {'***' if openai_key else 'None'}, anthropic_key: {'***' if anthropic_key else 'None'}")
|
| 958 |
+
|
| 959 |
+
# Extract user message text
|
| 960 |
+
if isinstance(message, dict):
|
| 961 |
+
user_message_text = message.get("text", "")
|
| 962 |
+
else:
|
| 963 |
+
user_message_text = message
|
| 964 |
+
|
| 965 |
+
# Add user message to history
|
| 966 |
+
history = history + [{"role": "user", "content": user_message_text}]
|
| 967 |
+
|
| 968 |
+
# Stream response
|
| 969 |
+
async for chat_text, tool_log_text in chat_with_tool_visibility(message, history, provider, hf_key, openai_key, anthropic_key, agent_mode, request):
|
| 970 |
+
# Update history with assistant response
|
| 971 |
+
updated_history = history + [{"role": "assistant", "content": chat_text}]
|
| 972 |
+
yield updated_history, tool_log_text
|
| 973 |
+
|
| 974 |
+
# ============================================================================
|
| 975 |
+
# UI DEFINITION - DUAL PANEL LAYOUT WITH CLOUD AESTHETIC
|
| 976 |
+
# ============================================================================
|
| 977 |
+
|
| 978 |
+
# Helper function to update text examples based on agent mode
|
| 979 |
+
def update_text_examples_for_mode(mode):
|
| 980 |
+
"""Return appropriate text example dataset based on agent mode."""
|
| 981 |
+
print(f"[DEBUG] Updating text examples for mode: {mode}")
|
| 982 |
+
|
| 983 |
+
if mode == "Audio Finder Agent":
|
| 984 |
+
# Audio text examples
|
| 985 |
+
samples = [[text] for text in AUDIO_FINDER_TEXT_EXAMPLES]
|
| 986 |
+
print(f"[DEBUG] Audio Finder text samples: {len(samples)} examples")
|
| 987 |
+
else: # Specialized Subagents (3 Specialists)
|
| 988 |
+
# Multi-agent text examples
|
| 989 |
+
samples = [[text] for text in MULTI_AGENT_TEXT_EXAMPLES]
|
| 990 |
+
print(f"[DEBUG] Multi-agent text samples: {len(samples)} examples")
|
| 991 |
+
|
| 992 |
+
return gr.Dataset(samples=samples)
|
| 993 |
+
|
| 994 |
+
# Helper function to create config HTML
|
| 995 |
+
def create_config_html(provider_choice, agent_mode_choice, hf_key_input, openai_key_input, anthropic_key_input=""):
|
| 996 |
+
"""Generate sky-themed config card HTML."""
|
| 997 |
+
# Determine model and API key status
|
| 998 |
+
if provider_choice == "HuggingFace":
|
| 999 |
+
model = AgentConfig.DEFAULT_HF_MODEL
|
| 1000 |
+
has_key = bool((hf_key_input and hf_key_input.strip()) or os.getenv("HF_API_KEY"))
|
| 1001 |
+
elif provider_choice == "Anthropic":
|
| 1002 |
+
model = AgentConfig.DEFAULT_ANTHROPIC_MODEL
|
| 1003 |
+
has_key = bool((anthropic_key_input and anthropic_key_input.strip()) or os.getenv("ANTHROPIC_API_KEY"))
|
| 1004 |
+
else:
|
| 1005 |
+
model = AgentConfig.DEFAULT_OPENAI_MODEL
|
| 1006 |
+
has_key = bool((openai_key_input and openai_key_input.strip()) or os.getenv("OPENAI_API_KEY"))
|
| 1007 |
+
|
| 1008 |
+
# Extract mode name
|
| 1009 |
+
mode_display = "3 Specialists" if "Specialized Subagents" in agent_mode_choice else "Audio Finder"
|
| 1010 |
+
|
| 1011 |
+
# Status styling
|
| 1012 |
+
if has_key:
|
| 1013 |
+
status_bg = "rgba(16, 185, 129, 0.2)"
|
| 1014 |
+
status_color = "#10b981"
|
| 1015 |
+
status_icon = "✓"
|
| 1016 |
+
else:
|
| 1017 |
+
status_bg = "rgba(239, 68, 68, 0.2)"
|
| 1018 |
+
status_color = "#ef4444"
|
| 1019 |
+
status_icon = "✗"
|
| 1020 |
+
|
| 1021 |
+
return f"""
|
| 1022 |
+
<div style="
|
| 1023 |
+
background: linear-gradient(135deg, rgba(31, 41, 55, 0.95) 0%, rgba(17, 24, 39, 0.98) 100%);
|
| 1024 |
+
border-radius: 12px;
|
| 1025 |
+
padding: 16px 20px;
|
| 1026 |
+
font-family: 'Segoe UI', system-ui, sans-serif;
|
| 1027 |
+
border: 1px solid #374151;
|
| 1028 |
+
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.3);
|
| 1029 |
+
backdrop-filter: blur(10px);
|
| 1030 |
+
">
|
| 1031 |
+
<!-- Provider Row -->
|
| 1032 |
+
<div style="
|
| 1033 |
+
display: flex;
|
| 1034 |
+
align-items: center;
|
| 1035 |
+
justify-content: space-between;
|
| 1036 |
+
padding: 6px 0;
|
| 1037 |
+
">
|
| 1038 |
+
<span style="font-size: 12px; color: #9ca3af;">Provider</span>
|
| 1039 |
+
<div style="display: flex; align-items: center; gap: 6px;">
|
| 1040 |
+
<span style="
|
| 1041 |
+
font-size: 13px;
|
| 1042 |
+
font-weight: 500;
|
| 1043 |
+
color: #f9fafb;
|
| 1044 |
+
">{provider_choice}</span>
|
| 1045 |
+
<span style="
|
| 1046 |
+
display: inline-flex;
|
| 1047 |
+
align-items: center;
|
| 1048 |
+
justify-content: center;
|
| 1049 |
+
width: 18px;
|
| 1050 |
+
height: 18px;
|
| 1051 |
+
border-radius: 50%;
|
| 1052 |
+
background: {status_bg};
|
| 1053 |
+
color: {status_color};
|
| 1054 |
+
font-size: 11px;
|
| 1055 |
+
font-weight: bold;
|
| 1056 |
+
">{status_icon}</span>
|
| 1057 |
+
</div>
|
| 1058 |
+
</div>
|
| 1059 |
+
|
| 1060 |
+
<!-- Model Row -->
|
| 1061 |
+
<div style="
|
| 1062 |
+
display: flex;
|
| 1063 |
+
align-items: center;
|
| 1064 |
+
justify-content: space-between;
|
| 1065 |
+
padding: 6px 0;
|
| 1066 |
+
">
|
| 1067 |
+
<span style="font-size: 12px; color: #9ca3af;">Model</span>
|
| 1068 |
+
<span style="
|
| 1069 |
+
font-size: 12px;
|
| 1070 |
+
font-weight: 500;
|
| 1071 |
+
color: #60a5fa;
|
| 1072 |
+
font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace;
|
| 1073 |
+
background: rgba(59, 130, 246, 0.15);
|
| 1074 |
+
padding: 2px 8px;
|
| 1075 |
+
border-radius: 4px;
|
| 1076 |
+
">{model}</span>
|
| 1077 |
+
</div>
|
| 1078 |
+
|
| 1079 |
+
<!-- Mode Row -->
|
| 1080 |
+
<div style="
|
| 1081 |
+
display: flex;
|
| 1082 |
+
align-items: center;
|
| 1083 |
+
justify-content: space-between;
|
| 1084 |
+
padding: 6px 0;
|
| 1085 |
+
">
|
| 1086 |
+
<span style="font-size: 12px; color: #9ca3af;">Mode</span>
|
| 1087 |
+
<span style="
|
| 1088 |
+
font-size: 12px;
|
| 1089 |
+
font-weight: 500;
|
| 1090 |
+
color: #38bdf8;
|
| 1091 |
+
background: rgba(56, 189, 248, 0.15);
|
| 1092 |
+
padding: 3px 10px;
|
| 1093 |
+
border-radius: 20px;
|
| 1094 |
+
border: 1px solid rgba(56, 189, 248, 0.3);
|
| 1095 |
+
">{mode_display}</span>
|
| 1096 |
+
</div>
|
| 1097 |
+
</div>
|
| 1098 |
+
"""
|
| 1099 |
+
|
| 1100 |
+
with gr.Blocks() as demo:
|
| 1101 |
+
|
| 1102 |
+
# ============================================================================
|
| 1103 |
+
# STATE MANAGEMENT - ONBOARDING FLOW
|
| 1104 |
+
# ============================================================================
|
| 1105 |
+
stored_hf_key = gr.State("")
|
| 1106 |
+
stored_openai_key = gr.State("")
|
| 1107 |
+
stored_anthropic_key = gr.State("")
|
| 1108 |
+
|
| 1109 |
+
# Enhanced BirdScope header
|
| 1110 |
+
gr.HTML("""
|
| 1111 |
+
<header class="birdscope-header">
|
| 1112 |
+
<!-- Decorative cloud elements -->
|
| 1113 |
+
<div style="position: absolute; inset: 0; overflow: hidden; pointer-events: none;">
|
| 1114 |
+
<div class="cloud-decor-1"></div>
|
| 1115 |
+
<div class="cloud-decor-2"></div>
|
| 1116 |
+
<div class="cloud-decor-3"></div>
|
| 1117 |
+
|
| 1118 |
+
<!-- Flying bird silhouettes -->
|
| 1119 |
+
<svg class="bird-silhouette bird-1" viewBox="0 0 24 24" fill="currentColor">
|
| 1120 |
+
<path d="M3.5 12C3.5 12 6 9 12 9C18 9 20.5 12 20.5 12C20.5 12 18 10 12 10C6 10 3.5 12 3.5 12Z"/>
|
| 1121 |
+
</svg>
|
| 1122 |
+
<svg class="bird-silhouette bird-2" viewBox="0 0 24 24" fill="currentColor">
|
| 1123 |
+
<path d="M3.5 12C3.5 12 6 9 12 9C18 9 20.5 12 20.5 12C20.5 12 18 10 12 10C6 10 3.5 12 3.5 12Z"/>
|
| 1124 |
+
</svg>
|
| 1125 |
+
<svg class="bird-silhouette bird-3" viewBox="0 0 24 24" fill="currentColor">
|
| 1126 |
+
<path d="M3.5 12C3.5 12 6 9 12 9C18 9 20.5 12 20.5 12C20.5 12 18 10 12 10C6 10 3.5 12 3.5 12Z"/>
|
| 1127 |
+
</svg>
|
| 1128 |
+
</div>
|
| 1129 |
+
|
| 1130 |
+
<!-- Main content -->
|
| 1131 |
+
<div class="header-content">
|
| 1132 |
+
<!-- Logo and title row -->
|
| 1133 |
+
<div class="header-top">
|
| 1134 |
+
<!-- Bird logo -->
|
| 1135 |
+
<div class="bird-logo-wrapper">
|
| 1136 |
+
<div class="bird-logo-glow"></div>
|
| 1137 |
+
<div class="bird-logo">
|
| 1138 |
+
<svg style="width: 2rem; height: 2rem; color: white;" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round">
|
| 1139 |
+
<!-- Stylized bird -->
|
| 1140 |
+
<path d="M21 8c-2 0-4 1-6 3-1.5-2-4-3-7-3-2 0-4 .5-5 1l8 4-2 6 4-3 4 3-2-6 8-4c-1-.5-1.5-1-2-1z" fill="currentColor" stroke-width="0"/>
|
| 1141 |
+
<circle cx="7" cy="9" r="1" fill="white"/>
|
| 1142 |
+
</svg>
|
| 1143 |
+
</div>
|
| 1144 |
+
</div>
|
| 1145 |
+
|
| 1146 |
+
<!-- Title -->
|
| 1147 |
+
<div class="header-title-group">
|
| 1148 |
+
<div style="display: flex; align-items: baseline; gap: 0.5rem;">
|
| 1149 |
+
<h1>BirdScope</h1>
|
| 1150 |
+
<span class="header-ai-text">AI</span>
|
| 1151 |
+
<span class="header-v2-badge">v2</span>
|
| 1152 |
+
</div>
|
| 1153 |
+
<p class="header-subtitle">AI-powered bird identification & species reference</p>
|
| 1154 |
+
</div>
|
| 1155 |
+
|
| 1156 |
+
</div>
|
| 1157 |
+
|
| 1158 |
+
<!-- Feature tags with MCP status check button -->
|
| 1159 |
+
<div class="feature-tags">
|
| 1160 |
+
<div class="feature-tag">
|
| 1161 |
+
<span>🔍</span>
|
| 1162 |
+
<span>Image Classification</span>
|
| 1163 |
+
</div>
|
| 1164 |
+
<div class="feature-tag">
|
| 1165 |
+
<span>📸</span>
|
| 1166 |
+
<span>Unsplash Reference</span>
|
| 1167 |
+
</div>
|
| 1168 |
+
<div class="feature-tag">
|
| 1169 |
+
<span>🎵</span>
|
| 1170 |
+
<span>Audio Recordings</span>
|
| 1171 |
+
</div>
|
| 1172 |
+
<div class="feature-tag">
|
| 1173 |
+
<span>🌍</span>
|
| 1174 |
+
<span>Conservation Status</span>
|
| 1175 |
+
</div>
|
| 1176 |
+
</div>
|
| 1177 |
+
</div>
|
| 1178 |
+
|
| 1179 |
+
<!-- Bottom border -->
|
| 1180 |
+
<div class="header-border"></div>
|
| 1181 |
+
</header>
|
| 1182 |
+
|
| 1183 |
+
<script>
|
| 1184 |
+
// Auto-scroll tool log panel continuously (for Textbox component)
|
| 1185 |
+
const observer = new MutationObserver(() => {
|
| 1186 |
+
const toolLog = document.querySelector('#tool-log-output textarea');
|
| 1187 |
+
if (toolLog) {
|
| 1188 |
+
toolLog.scrollTop = toolLog.scrollHeight;
|
| 1189 |
+
}
|
| 1190 |
+
});
|
| 1191 |
+
|
| 1192 |
+
// Start observing once the page loads
|
| 1193 |
+
setTimeout(() => {
|
| 1194 |
+
const toolLogContainer = document.querySelector('#tool-log-output');
|
| 1195 |
+
if (toolLogContainer) {
|
| 1196 |
+
observer.observe(toolLogContainer, {
|
| 1197 |
+
childList: true,
|
| 1198 |
+
subtree: true,
|
| 1199 |
+
characterData: true,
|
| 1200 |
+
attributes: true
|
| 1201 |
+
});
|
| 1202 |
+
}
|
| 1203 |
+
}, 1000);
|
| 1204 |
+
</script>
|
| 1205 |
+
""")
|
| 1206 |
+
|
| 1207 |
+
# ============================================================================
|
| 1208 |
+
# ONBOARDING WALKTHROUGH - Using Native Gradio Component
|
| 1209 |
+
# ============================================================================
|
| 1210 |
+
with gr.Walkthrough(selected=1) as walkthrough:
|
| 1211 |
+
|
| 1212 |
+
# Step 1: Welcome & Provider Selection
|
| 1213 |
+
with gr.Step("Welcome", id=1):
|
| 1214 |
+
with gr.Column(elem_classes=["sidebar", "onboarding-page"]):
|
| 1215 |
+
gr.Markdown(
|
| 1216 |
+
"""
|
| 1217 |
+
# Welcome to BirdScope AI!
|
| 1218 |
+
|
| 1219 |
+
Let's get you started with your AI-powered bird identification assistant.
|
| 1220 |
+
""",
|
| 1221 |
+
elem_classes=["welcome-text"]
|
| 1222 |
+
)
|
| 1223 |
+
|
| 1224 |
+
gr.Markdown("---")
|
| 1225 |
+
|
| 1226 |
+
gr.Markdown("### SELECT LLM PROVIDER")
|
| 1227 |
+
welcome_provider = gr.Dropdown(
|
| 1228 |
+
choices=["HuggingFace", "OpenAI", "Anthropic"],
|
| 1229 |
+
value="OpenAI",
|
| 1230 |
+
show_label=False,
|
| 1231 |
+
container=False
|
| 1232 |
+
)
|
| 1233 |
+
|
| 1234 |
+
gr.Markdown("**Choose your AI provider**")
|
| 1235 |
+
gr.Markdown("Select between HuggingFace (open models) or OpenAI (GPT models)")
|
| 1236 |
+
|
| 1237 |
+
gr.Markdown("---")
|
| 1238 |
+
|
| 1239 |
+
gr.HTML("""
|
| 1240 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1241 |
+
<img src="https://cdn.brandfetch.io/idGqKHD5xE/theme/dark/symbol.svg?c=1bxid64Mup7aczewSAYMX&t=1668516030712"
|
| 1242 |
+
alt="HuggingFace"
|
| 1243 |
+
style="width: 20px; height: 20px;">
|
| 1244 |
+
<strong style="color: #d1d5db;">HuggingFace</strong>
|
| 1245 |
+
</div>
|
| 1246 |
+
""")
|
| 1247 |
+
gr.Markdown("Uses open-source models like Qwen 2.5-72B")
|
| 1248 |
+
|
| 1249 |
+
gr.HTML("""
|
| 1250 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-top: 16px;">
|
| 1251 |
+
<img src="https://cdn.oaistatic.com/_next/static/media/apple-touch-icon.59f2e898.png"
|
| 1252 |
+
alt="OpenAI"
|
| 1253 |
+
style="width: 20px; height: 20px; border-radius: 4px;">
|
| 1254 |
+
<strong style="color: #d1d5db;">OpenAI</strong>
|
| 1255 |
+
</div>
|
| 1256 |
+
""")
|
| 1257 |
+
gr.Markdown("Uses GPT-4 models for high-quality responses")
|
| 1258 |
+
|
| 1259 |
+
gr.HTML("""
|
| 1260 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-top: 16px;">
|
| 1261 |
+
<img src="https://mintlify.s3.us-west-1.amazonaws.com/anthropic/logo/dark.svg"
|
| 1262 |
+
alt="Anthropic"
|
| 1263 |
+
style="width: 20px; height: 20px;">
|
| 1264 |
+
<strong style="color: #d1d5db;">Anthropic</strong>
|
| 1265 |
+
</div>
|
| 1266 |
+
""")
|
| 1267 |
+
gr.Markdown("Uses Claude models (Sonnet, Opus, Haiku)")
|
| 1268 |
+
|
| 1269 |
+
gr.Markdown("---")
|
| 1270 |
+
|
| 1271 |
+
welcome_next_btn = gr.Button("Next: Enter API Key →", variant="primary", size="lg")
|
| 1272 |
+
|
| 1273 |
+
# Step 2: API Key Input
|
| 1274 |
+
with gr.Step("API Key", id=2):
|
| 1275 |
+
with gr.Column(elem_classes=["sidebar", "onboarding-page"]):
|
| 1276 |
+
gr.Markdown("# Step 2: Enter Your API Key 🔑")
|
| 1277 |
+
gr.Markdown("To use BirdScope AI, you'll need an API key from your selected provider.")
|
| 1278 |
+
|
| 1279 |
+
gr.Markdown("---")
|
| 1280 |
+
|
| 1281 |
+
# HuggingFace API key section
|
| 1282 |
+
with gr.Column(visible=False) as hf_key_section:
|
| 1283 |
+
gr.Markdown("### AUTHENTICATION")
|
| 1284 |
+
gr.HTML("""
|
| 1285 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1286 |
+
<img src="https://cdn.brandfetch.io/idGqKHD5xE/theme/dark/symbol.svg?c=1bxid64Mup7aczewSAYMX&t=1668516030712"
|
| 1287 |
+
alt="HuggingFace"
|
| 1288 |
+
style="width: 20px; height: 20px;">
|
| 1289 |
+
<strong style="color: #d1d5db;">HuggingFace API Key</strong>
|
| 1290 |
+
</div>
|
| 1291 |
+
""")
|
| 1292 |
+
onboarding_hf_key = gr.Textbox(
|
| 1293 |
+
placeholder="hf_...",
|
| 1294 |
+
type="password",
|
| 1295 |
+
show_label=False,
|
| 1296 |
+
container=False,
|
| 1297 |
+
elem_classes=["hf-section"]
|
| 1298 |
+
)
|
| 1299 |
+
gr.Markdown("Get your key from [HF Settings](https://huggingface.co/settings/tokens)")
|
| 1300 |
+
|
| 1301 |
+
# OpenAI API key section
|
| 1302 |
+
with gr.Column(visible=False) as openai_key_section:
|
| 1303 |
+
gr.Markdown("### AUTHENTICATION")
|
| 1304 |
+
gr.HTML("""
|
| 1305 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1306 |
+
<img src="https://cdn.oaistatic.com/_next/static/media/apple-touch-icon.59f2e898.png"
|
| 1307 |
+
alt="OpenAI"
|
| 1308 |
+
style="width: 20px; height: 20px; border-radius: 4px;">
|
| 1309 |
+
<strong style="color: #d1d5db;">OpenAI API Key</strong>
|
| 1310 |
+
</div>
|
| 1311 |
+
""")
|
| 1312 |
+
onboarding_openai_key = gr.Textbox(
|
| 1313 |
+
placeholder="sk-...",
|
| 1314 |
+
type="password",
|
| 1315 |
+
show_label=False,
|
| 1316 |
+
container=False,
|
| 1317 |
+
elem_classes=["openai-section"]
|
| 1318 |
+
)
|
| 1319 |
+
gr.Markdown("Get your key from [OpenAI Platform](https://platform.openai.com/api-keys)")
|
| 1320 |
+
|
| 1321 |
+
# Anthropic API key section
|
| 1322 |
+
with gr.Column(visible=False) as anthropic_key_section:
|
| 1323 |
+
gr.Markdown("### AUTHENTICATION")
|
| 1324 |
+
gr.HTML("""
|
| 1325 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1326 |
+
<img src="https://mintlify.s3.us-west-1.amazonaws.com/anthropic/logo/dark.svg"
|
| 1327 |
+
alt="Anthropic"
|
| 1328 |
+
style="width: 20px; height: 20px;">
|
| 1329 |
+
<strong style="color: #d1d5db;">Anthropic API Key</strong>
|
| 1330 |
+
</div>
|
| 1331 |
+
""")
|
| 1332 |
+
onboarding_anthropic_key = gr.Textbox(
|
| 1333 |
+
placeholder="sk-ant-...",
|
| 1334 |
+
type="password",
|
| 1335 |
+
show_label=False,
|
| 1336 |
+
container=False,
|
| 1337 |
+
elem_classes=["anthropic-section"]
|
| 1338 |
+
)
|
| 1339 |
+
gr.Markdown("Get your key from [Anthropic Console](https://console.anthropic.com/settings/keys)")
|
| 1340 |
+
|
| 1341 |
+
gr.Markdown("---")
|
| 1342 |
+
|
| 1343 |
+
with gr.Row():
|
| 1344 |
+
api_back_btn = gr.Button("← Back", variant="secondary", scale=1)
|
| 1345 |
+
api_start_btn = gr.Button("Start Using BirdScope →", variant="primary", scale=3)
|
| 1346 |
+
|
| 1347 |
+
# Step 3: Main App
|
| 1348 |
+
with gr.Step("BirdScope AI", id=3):
|
| 1349 |
+
with gr.Row():
|
| 1350 |
+
# Left: Chat interface (scale=2)
|
| 1351 |
+
with gr.Column(scale=2):
|
| 1352 |
+
chatbot = gr.Chatbot(
|
| 1353 |
+
show_label=False,
|
| 1354 |
+
height=500,
|
| 1355 |
+
elem_classes=["chatbot-container"]
|
| 1356 |
+
)
|
| 1357 |
+
|
| 1358 |
+
msg = gr.MultimodalTextbox(
|
| 1359 |
+
placeholder="Ask about birds or upload an image...",
|
| 1360 |
+
file_count="single",
|
| 1361 |
+
file_types=["image"],
|
| 1362 |
+
interactive=True,
|
| 1363 |
+
show_label=False
|
| 1364 |
+
)
|
| 1365 |
+
|
| 1366 |
+
with gr.Row():
|
| 1367 |
+
submit = gr.Button("Send", scale=3)
|
| 1368 |
+
clear = gr.Button("Clear", scale=1)
|
| 1369 |
+
|
| 1370 |
+
# Photo examples - always shown (static)
|
| 1371 |
+
gr.Markdown("**Try uploading a bird photo:**")
|
| 1372 |
+
gr.Examples(
|
| 1373 |
+
examples=PHOTO_EXAMPLES,
|
| 1374 |
+
inputs=msg,
|
| 1375 |
+
cache_examples=False
|
| 1376 |
+
)
|
| 1377 |
+
|
| 1378 |
+
# Text examples - change based on agent mode (dynamic)
|
| 1379 |
+
gr.Markdown("**Or try a text query:**")
|
| 1380 |
+
text_examples = gr.Examples(
|
| 1381 |
+
examples=MULTI_AGENT_TEXT_EXAMPLES, # Default to multi-agent text examples
|
| 1382 |
+
inputs=msg,
|
| 1383 |
+
cache_examples=False
|
| 1384 |
+
)
|
| 1385 |
+
|
| 1386 |
+
# Middle: Tool execution log (scale=1)
|
| 1387 |
+
with gr.Column(scale=1):
|
| 1388 |
+
tool_output = gr.Textbox(
|
| 1389 |
+
value="*Waiting for tool calls...*",
|
| 1390 |
+
elem_classes=["tool-log-panel"],
|
| 1391 |
+
elem_id="tool-log-output",
|
| 1392 |
+
autoscroll=True,
|
| 1393 |
+
show_label=False,
|
| 1394 |
+
interactive=False,
|
| 1395 |
+
container=False
|
| 1396 |
+
)
|
| 1397 |
+
|
| 1398 |
+
# Right: Sidebar (scale=1)
|
| 1399 |
+
with gr.Column(scale=1, elem_classes=["sidebar"]):
|
| 1400 |
+
|
| 1401 |
+
# MCP Server Status Check
|
| 1402 |
+
mcp_status_html = gr.HTML("""
|
| 1403 |
+
<div class="mcp-badge online" style="margin-bottom: 16px; justify-content: center;">
|
| 1404 |
+
<span class="mcp-pulse"></span>
|
| 1405 |
+
<span>Powered by Modal MCP</span>
|
| 1406 |
+
</div>
|
| 1407 |
+
""")
|
| 1408 |
+
check_mcp_btn = gr.Button("Check Modal MCP Server Status", size="sm", variant="secondary", elem_classes=["modal-check-btn"])
|
| 1409 |
+
|
| 1410 |
+
gr.HTML("""
|
| 1411 |
+
<p style="font-size: 0.75rem; color: #9ca3af; margin-top: 8px; margin-bottom: 16px; line-height: 1.4;">
|
| 1412 |
+
Please be patient if the Modal MCP server needs to cold start
|
| 1413 |
+
</p>
|
| 1414 |
+
""")
|
| 1415 |
+
|
| 1416 |
+
gr.Markdown("---")
|
| 1417 |
+
|
| 1418 |
+
# Provider selection
|
| 1419 |
+
gr.Markdown("### SELECT LLM PROVIDER")
|
| 1420 |
+
provider = gr.Dropdown(
|
| 1421 |
+
choices=["HuggingFace", "OpenAI", "Anthropic"],
|
| 1422 |
+
value="OpenAI",
|
| 1423 |
+
show_label=False,
|
| 1424 |
+
container=False
|
| 1425 |
+
)
|
| 1426 |
+
|
| 1427 |
+
# Agent Mode Selector
|
| 1428 |
+
gr.Markdown("**Agent Configuration**")
|
| 1429 |
+
gr.Markdown("Choose between unified agent or specialized routing")
|
| 1430 |
+
agent_mode = gr.Dropdown(
|
| 1431 |
+
choices=[
|
| 1432 |
+
"Specialized Subagents (3 Specialists)",
|
| 1433 |
+
"Audio Finder Agent" # Changed from "Single Agent (All Tools)"
|
| 1434 |
+
],
|
| 1435 |
+
value="Specialized Subagents (3 Specialists)",
|
| 1436 |
+
show_label=False,
|
| 1437 |
+
container=False
|
| 1438 |
+
)
|
| 1439 |
+
|
| 1440 |
+
gr.Markdown("---")
|
| 1441 |
+
|
| 1442 |
+
# API Keys
|
| 1443 |
+
gr.Markdown("### AUTHENTICATION")
|
| 1444 |
+
|
| 1445 |
+
gr.HTML("""
|
| 1446 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1447 |
+
<img src="https://cdn.brandfetch.io/idGqKHD5xE/theme/dark/symbol.svg?c=1bxid64Mup7aczewSAYMX&t=1668516030712"
|
| 1448 |
+
alt="HuggingFace"
|
| 1449 |
+
style="width: 20px; height: 20px;">
|
| 1450 |
+
<strong style="color: #d1d5db;">HuggingFace API Key</strong>
|
| 1451 |
+
</div>
|
| 1452 |
+
""")
|
| 1453 |
+
hf_key = gr.Textbox(
|
| 1454 |
+
placeholder="hf_...",
|
| 1455 |
+
type="password",
|
| 1456 |
+
show_label=False,
|
| 1457 |
+
container=False,
|
| 1458 |
+
elem_classes=["hf-section"]
|
| 1459 |
+
)
|
| 1460 |
+
gr.Markdown("Get your key from [HF Settings](https://huggingface.co/settings/tokens)")
|
| 1461 |
+
|
| 1462 |
+
gr.HTML("""
|
| 1463 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1464 |
+
<img src="https://cdn.oaistatic.com/_next/static/media/apple-touch-icon.59f2e898.png"
|
| 1465 |
+
alt="OpenAI"
|
| 1466 |
+
style="width: 20px; height: 20px; border-radius: 4px;">
|
| 1467 |
+
<strong style="color: #d1d5db;">OpenAI API Key</strong>
|
| 1468 |
+
</div>
|
| 1469 |
+
""")
|
| 1470 |
+
openai_key = gr.Textbox(
|
| 1471 |
+
placeholder="sk-...",
|
| 1472 |
+
type="password",
|
| 1473 |
+
show_label=False,
|
| 1474 |
+
container=False,
|
| 1475 |
+
elem_classes=["openai-section"]
|
| 1476 |
+
)
|
| 1477 |
+
gr.Markdown("Get your key from [OpenAI Platform](https://platform.openai.com/api-keys)")
|
| 1478 |
+
|
| 1479 |
+
gr.HTML("""
|
| 1480 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 8px;">
|
| 1481 |
+
<img src="https://mintlify.s3.us-west-1.amazonaws.com/anthropic/logo/dark.svg"
|
| 1482 |
+
alt="Anthropic"
|
| 1483 |
+
style="width: 20px; height: 20px;">
|
| 1484 |
+
<strong style="color: #d1d5db;">Anthropic API Key</strong>
|
| 1485 |
+
</div>
|
| 1486 |
+
""")
|
| 1487 |
+
anthropic_key = gr.Textbox(
|
| 1488 |
+
placeholder="sk-ant-...",
|
| 1489 |
+
type="password",
|
| 1490 |
+
show_label=False,
|
| 1491 |
+
container=False,
|
| 1492 |
+
elem_classes=["anthropic-section"]
|
| 1493 |
+
)
|
| 1494 |
+
gr.Markdown("Get your key from [Anthropic Console](https://console.anthropic.com/settings/keys)")
|
| 1495 |
+
|
| 1496 |
+
# Current Configuration Display
|
| 1497 |
+
gr.Markdown("---")
|
| 1498 |
+
gr.Markdown("### CURRENT CONFIG")
|
| 1499 |
+
|
| 1500 |
+
# Generate initial config HTML
|
| 1501 |
+
session_status = gr.HTML(
|
| 1502 |
+
value=create_config_html(
|
| 1503 |
+
provider_choice="OpenAI",
|
| 1504 |
+
agent_mode_choice="Specialized Subagents (3 Specialists)",
|
| 1505 |
+
hf_key_input="",
|
| 1506 |
+
openai_key_input="",
|
| 1507 |
+
anthropic_key_input=""
|
| 1508 |
+
)
|
| 1509 |
+
)
|
| 1510 |
+
|
| 1511 |
+
# About
|
| 1512 |
+
gr.Markdown("---")
|
| 1513 |
+
gr.Markdown("""
|
| 1514 |
+
### ABOUT
|
| 1515 |
+
|
| 1516 |
+
Built for the [Hugging Face MCP-1st-Birthday Hackathon](https://huggingface.co/MCP-1st-Birthday)
|
| 1517 |
+
""")
|
| 1518 |
+
|
| 1519 |
+
gr.HTML("""
|
| 1520 |
+
<div style="text-align: center; margin: 16px 0;">
|
| 1521 |
+
<img src="https://cdn-uploads.huggingface.co/production/uploads/60d2dc1007da9c17c72708f8/s4q7RzD3S-8xQ8ecXrSwb.png"
|
| 1522 |
+
alt="Hugging Face MCP 1st Birthday"
|
| 1523 |
+
style="max-width: 100%; height: auto; border-radius: 8px;">
|
| 1524 |
+
</div>
|
| 1525 |
+
""")
|
| 1526 |
+
|
| 1527 |
+
gr.Markdown("""
|
| 1528 |
+
**MCP Servers:**
|
| 1529 |
+
- Modal GPU classifier (2 tools)
|
| 1530 |
+
- Nuthatch species database (7 tools)
|
| 1531 |
+
|
| 1532 |
+
**Capabilities:**
|
| 1533 |
+
- Visual bird identification
|
| 1534 |
+
- Species reference images (Unsplash)
|
| 1535 |
+
- Audio recordings (xeno-canto)
|
| 1536 |
+
- Conservation status data
|
| 1537 |
+
- Taxonomic exploration
|
| 1538 |
+
|
| 1539 |
+
**v2 Features:**
|
| 1540 |
+
- Separate tool log panel
|
| 1541 |
+
- Detailed execution tracking
|
| 1542 |
+
- Tool input/output inspection
|
| 1543 |
+
- Perfect for debugging!
|
| 1544 |
+
""")
|
| 1545 |
+
|
| 1546 |
+
# State for tool log
|
| 1547 |
+
tool_log_state = gr.State("*Waiting for tool calls...*")
|
| 1548 |
+
|
| 1549 |
+
# ============================================================================
|
| 1550 |
+
# ONBOARDING NAVIGATION HANDLERS - Using Walkthrough
|
| 1551 |
+
# ============================================================================
|
| 1552 |
+
|
| 1553 |
+
def handle_welcome_next(provider_choice):
|
| 1554 |
+
"""Navigate to API key page and show appropriate input section."""
|
| 1555 |
+
show_hf = provider_choice == "HuggingFace"
|
| 1556 |
+
show_openai = provider_choice == "OpenAI"
|
| 1557 |
+
show_anthropic = provider_choice == "Anthropic"
|
| 1558 |
+
|
| 1559 |
+
return (
|
| 1560 |
+
gr.Walkthrough(selected=2), # walkthrough - go to step 2
|
| 1561 |
+
gr.update(visible=show_hf), # hf_key_section
|
| 1562 |
+
gr.update(visible=show_openai), # openai_key_section
|
| 1563 |
+
gr.update(visible=show_anthropic) # anthropic_key_section
|
| 1564 |
+
)
|
| 1565 |
+
|
| 1566 |
+
def handle_api_back():
|
| 1567 |
+
"""Navigate back to welcome page."""
|
| 1568 |
+
return gr.Walkthrough(selected=1)
|
| 1569 |
+
|
| 1570 |
+
def handle_api_start(provider_choice, hf_key_input, openai_key_input, anthropic_key_input):
|
| 1571 |
+
"""Save credentials and navigate to main app with pre-populated values."""
|
| 1572 |
+
provider_str = str(provider_choice) if provider_choice else "OpenAI"
|
| 1573 |
+
|
| 1574 |
+
# Debug output
|
| 1575 |
+
print(f"[DEBUG] handle_api_start - provider: {provider_str}")
|
| 1576 |
+
print(f"[DEBUG] handle_api_start - hf_key: {'***' if hf_key_input else 'empty'}")
|
| 1577 |
+
print(f"[DEBUG] handle_api_start - openai_key: {'***' if openai_key_input else 'empty'}")
|
| 1578 |
+
print(f"[DEBUG] handle_api_start - anthropic_key: {'***' if anthropic_key_input else 'empty'}")
|
| 1579 |
+
|
| 1580 |
+
# Determine which API key to use
|
| 1581 |
+
if provider_str == "HuggingFace":
|
| 1582 |
+
hf_key_value = hf_key_input if hf_key_input else ""
|
| 1583 |
+
openai_key_value = ""
|
| 1584 |
+
anthropic_key_value = ""
|
| 1585 |
+
elif provider_str == "Anthropic":
|
| 1586 |
+
hf_key_value = ""
|
| 1587 |
+
openai_key_value = ""
|
| 1588 |
+
anthropic_key_value = anthropic_key_input if anthropic_key_input else ""
|
| 1589 |
+
else:
|
| 1590 |
+
hf_key_value = ""
|
| 1591 |
+
openai_key_value = openai_key_input if openai_key_input else ""
|
| 1592 |
+
anthropic_key_value = ""
|
| 1593 |
+
|
| 1594 |
+
# Generate config HTML
|
| 1595 |
+
config_html = create_config_html(
|
| 1596 |
+
provider_choice=provider_str,
|
| 1597 |
+
agent_mode_choice="Specialized Subagents (3 Specialists)",
|
| 1598 |
+
hf_key_input=hf_key_value,
|
| 1599 |
+
openai_key_input=openai_key_value,
|
| 1600 |
+
anthropic_key_input=anthropic_key_value
|
| 1601 |
+
)
|
| 1602 |
+
|
| 1603 |
+
return (
|
| 1604 |
+
gr.Walkthrough(selected=3), # walkthrough - go to step 3 (main app)
|
| 1605 |
+
provider_str, # provider dropdown
|
| 1606 |
+
hf_key_value, # hf_key textbox
|
| 1607 |
+
openai_key_value, # openai_key textbox
|
| 1608 |
+
anthropic_key_value, # anthropic_key textbox
|
| 1609 |
+
config_html, # session_status HTML
|
| 1610 |
+
hf_key_value, # stored_hf_key state
|
| 1611 |
+
openai_key_value, # stored_openai_key state
|
| 1612 |
+
anthropic_key_value # stored_anthropic_key state
|
| 1613 |
+
)
|
| 1614 |
+
|
| 1615 |
+
# Connect onboarding navigation
|
| 1616 |
+
welcome_next_btn.click(
|
| 1617 |
+
fn=handle_welcome_next,
|
| 1618 |
+
inputs=[welcome_provider],
|
| 1619 |
+
outputs=[walkthrough, hf_key_section, openai_key_section, anthropic_key_section]
|
| 1620 |
+
)
|
| 1621 |
+
|
| 1622 |
+
api_back_btn.click(
|
| 1623 |
+
fn=handle_api_back,
|
| 1624 |
+
outputs=[walkthrough]
|
| 1625 |
+
)
|
| 1626 |
+
|
| 1627 |
+
api_start_btn.click(
|
| 1628 |
+
fn=handle_api_start,
|
| 1629 |
+
inputs=[welcome_provider, onboarding_hf_key, onboarding_openai_key, onboarding_anthropic_key],
|
| 1630 |
+
outputs=[
|
| 1631 |
+
walkthrough,
|
| 1632 |
+
provider,
|
| 1633 |
+
hf_key,
|
| 1634 |
+
openai_key,
|
| 1635 |
+
anthropic_key,
|
| 1636 |
+
session_status,
|
| 1637 |
+
stored_hf_key,
|
| 1638 |
+
stored_openai_key,
|
| 1639 |
+
stored_anthropic_key
|
| 1640 |
+
]
|
| 1641 |
+
)
|
| 1642 |
+
|
| 1643 |
+
# Helper function to update MCP badge HTML
|
| 1644 |
+
def update_mcp_badge_html(status_text: str) -> str:
|
| 1645 |
+
"""Generate HTML for MCP badge based on status."""
|
| 1646 |
+
# Determine badge class based on status
|
| 1647 |
+
if "✅" in status_text or "Online" in status_text:
|
| 1648 |
+
badge_class = "online"
|
| 1649 |
+
elif "❌" in status_text or "Offline" in status_text:
|
| 1650 |
+
badge_class = "offline"
|
| 1651 |
+
elif "⏱️" in status_text or "Timeout" in status_text or "Checking" in status_text:
|
| 1652 |
+
badge_class = "checking"
|
| 1653 |
+
else:
|
| 1654 |
+
badge_class = "online"
|
| 1655 |
+
|
| 1656 |
+
return f"""
|
| 1657 |
+
<div class="mcp-badge {badge_class}" style="margin-bottom: 16px; justify-content: center;">
|
| 1658 |
+
<span class="mcp-pulse"></span>
|
| 1659 |
+
<span>{status_text}</span>
|
| 1660 |
+
</div>
|
| 1661 |
+
"""
|
| 1662 |
+
|
| 1663 |
+
# JavaScript to scroll tool log to bottom
|
| 1664 |
+
scroll_js = """
|
| 1665 |
+
() => {
|
| 1666 |
+
const toolLog = document.querySelector('#tool-log-output textarea');
|
| 1667 |
+
if (toolLog) {
|
| 1668 |
+
toolLog.scrollTop = toolLog.scrollHeight;
|
| 1669 |
+
}
|
| 1670 |
+
}
|
| 1671 |
+
"""
|
| 1672 |
+
|
| 1673 |
+
# Connect events
|
| 1674 |
+
# Update config display when provider, agent mode, or API keys change
|
| 1675 |
+
provider.change(
|
| 1676 |
+
fn=create_config_html,
|
| 1677 |
+
inputs=[provider, agent_mode, hf_key, openai_key, anthropic_key],
|
| 1678 |
+
outputs=[session_status]
|
| 1679 |
+
)
|
| 1680 |
+
agent_mode.change(
|
| 1681 |
+
fn=create_config_html,
|
| 1682 |
+
inputs=[provider, agent_mode, hf_key, openai_key, anthropic_key],
|
| 1683 |
+
outputs=[session_status]
|
| 1684 |
+
)
|
| 1685 |
+
|
| 1686 |
+
# Update text examples when agent mode changes (photo examples stay the same)
|
| 1687 |
+
agent_mode.change(
|
| 1688 |
+
fn=update_text_examples_for_mode,
|
| 1689 |
+
inputs=[agent_mode],
|
| 1690 |
+
outputs=[text_examples.dataset]
|
| 1691 |
+
)
|
| 1692 |
+
|
| 1693 |
+
hf_key.change(
|
| 1694 |
+
fn=create_config_html,
|
| 1695 |
+
inputs=[provider, agent_mode, hf_key, openai_key, anthropic_key],
|
| 1696 |
+
outputs=[session_status]
|
| 1697 |
+
)
|
| 1698 |
+
openai_key.change(
|
| 1699 |
+
fn=create_config_html,
|
| 1700 |
+
inputs=[provider, agent_mode, hf_key, openai_key, anthropic_key],
|
| 1701 |
+
outputs=[session_status]
|
| 1702 |
+
)
|
| 1703 |
+
anthropic_key.change(
|
| 1704 |
+
fn=create_config_html,
|
| 1705 |
+
inputs=[provider, agent_mode, hf_key, openai_key, anthropic_key],
|
| 1706 |
+
outputs=[session_status]
|
| 1707 |
+
)
|
| 1708 |
+
|
| 1709 |
+
submit_event = msg.submit(
|
| 1710 |
+
fn=chat_wrapper,
|
| 1711 |
+
inputs=[msg, chatbot, provider, hf_key, openai_key, anthropic_key, agent_mode, tool_log_state],
|
| 1712 |
+
outputs=[chatbot, tool_output]
|
| 1713 |
+
).then(
|
| 1714 |
+
lambda: None,
|
| 1715 |
+
None,
|
| 1716 |
+
msg,
|
| 1717 |
+
js=scroll_js
|
| 1718 |
+
)
|
| 1719 |
+
|
| 1720 |
+
submit_click = submit.click(
|
| 1721 |
+
fn=chat_wrapper,
|
| 1722 |
+
inputs=[msg, chatbot, provider, hf_key, openai_key, anthropic_key, agent_mode, tool_log_state],
|
| 1723 |
+
outputs=[chatbot, tool_output]
|
| 1724 |
+
).then(
|
| 1725 |
+
lambda: None,
|
| 1726 |
+
None,
|
| 1727 |
+
msg,
|
| 1728 |
+
js=scroll_js
|
| 1729 |
+
)
|
| 1730 |
+
|
| 1731 |
+
def clear_conversation(request: gr.Request):
|
| 1732 |
+
"""Clear UI and agent memory by removing agent from cache."""
|
| 1733 |
+
from agent_cache import agent_cache, agent_last_used
|
| 1734 |
+
|
| 1735 |
+
# Clear all cached agents for this session
|
| 1736 |
+
session_id = request.session_hash
|
| 1737 |
+
keys_to_remove = [key for key in agent_cache.keys() if key[0] == session_id]
|
| 1738 |
+
|
| 1739 |
+
for key in keys_to_remove:
|
| 1740 |
+
del agent_cache[key]
|
| 1741 |
+
if key in agent_last_used:
|
| 1742 |
+
del agent_last_used[key]
|
| 1743 |
+
|
| 1744 |
+
print(f"[DEBUG] Clear clicked - removed {len(keys_to_remove)} cached agents for session {session_id[:8]}")
|
| 1745 |
+
return [], "*Waiting for tool calls...*", None
|
| 1746 |
+
|
| 1747 |
+
clear.click(
|
| 1748 |
+
fn=clear_conversation,
|
| 1749 |
+
inputs=[], # request will be auto-injected
|
| 1750 |
+
outputs=[chatbot, tool_output, msg]
|
| 1751 |
+
)
|
| 1752 |
+
|
| 1753 |
+
# MCP status check handler
|
| 1754 |
+
async def handle_mcp_check():
|
| 1755 |
+
"""Check MCP status and return updated HTML."""
|
| 1756 |
+
# First return "checking" state
|
| 1757 |
+
yield update_mcp_badge_html("Checking...")
|
| 1758 |
+
# Then check actual status
|
| 1759 |
+
status = await check_modal_server_health()
|
| 1760 |
+
yield update_mcp_badge_html(status)
|
| 1761 |
+
|
| 1762 |
+
check_mcp_btn.click(
|
| 1763 |
+
fn=handle_mcp_check,
|
| 1764 |
+
outputs=mcp_status_html,
|
| 1765 |
+
show_progress="hidden"
|
| 1766 |
+
)
|
| 1767 |
+
|
| 1768 |
+
if __name__ == "__main__":
|
| 1769 |
+
# JavaScript to force dark mode
|
| 1770 |
+
force_dark_mode = """
|
| 1771 |
+
function() {
|
| 1772 |
+
const params = new URLSearchParams(window.location.search);
|
| 1773 |
+
if (!params.has('__theme')) {
|
| 1774 |
+
params.set('__theme', 'dark');
|
| 1775 |
+
window.location.search = params.toString();
|
| 1776 |
+
}
|
| 1777 |
+
}
|
| 1778 |
+
"""
|
| 1779 |
+
|
| 1780 |
+
demo.launch(theme=gr.themes.Soft(), css=custom_css, js=force_dark_mode)
|
bonus_ebird_tools.py
ADDED
|
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
eBird MCP Server
|
| 3 |
+
Wraps eBird API v2 as reusable MCP tools
|
| 4 |
+
Runs locally with FastMCP and supports both stdio and streamable-http transport
|
| 5 |
+
|
| 6 |
+
Features:
|
| 7 |
+
- 7 core tools for bird data discovery
|
| 8 |
+
- Configurable tool enabling/disabling
|
| 9 |
+
- Support for both user input AND classifier output
|
| 10 |
+
- Rate limiting and error handling
|
| 11 |
+
- JSON responses for easy integration
|
| 12 |
+
- Dual transport: stdio for CLI, streamable-http for web clients (via FastAPI)
|
| 13 |
+
"""
|
| 14 |
+
import os
|
| 15 |
+
import sys
|
| 16 |
+
import requests
|
| 17 |
+
import json
|
| 18 |
+
import time
|
| 19 |
+
from typing import Optional, Dict, List, Any
|
| 20 |
+
from fastmcp import FastMCP
|
| 21 |
+
from dotenv import load_dotenv
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# ============================================================================
|
| 25 |
+
# CONFIGURATION & SETUP
|
| 26 |
+
# ============================================================================
|
| 27 |
+
|
| 28 |
+
load_dotenv()
|
| 29 |
+
|
| 30 |
+
EBIRD_API_KEY = os.getenv("EBIRD_API_KEY")
|
| 31 |
+
BASE_URL = os.getenv("EBIRD_BASE_URL", "https://api.ebird.org/v2")
|
| 32 |
+
DEFAULT_TIMEOUT = 15
|
| 33 |
+
RATE_LIMIT_DELAY = 0.1 # 100ms between requests
|
| 34 |
+
|
| 35 |
+
if not EBIRD_API_KEY:
|
| 36 |
+
# Print to stderr to avoid corrupting STDIO MCP protocol (stdout must be JSON-RPC only)
|
| 37 |
+
print("⚠️ [WARNING]: EBIRD_API_KEY not found in .env", file=sys.stderr)
|
| 38 |
+
print(" Get one from: https://ebird.org/api/keygen", file=sys.stderr)
|
| 39 |
+
|
| 40 |
+
# Authentication configuration (production only)
|
| 41 |
+
IS_PRODUCTION = os.getenv("ENVIRONMENT") == "production"
|
| 42 |
+
MCP_API_KEY = os.getenv("MCP_API_KEY")
|
| 43 |
+
|
| 44 |
+
# Tool configuration - enable/disable as needed
|
| 45 |
+
ENABLED_TOOLS = {
|
| 46 |
+
"search_species": True,
|
| 47 |
+
"get_recent_sightings_nearby": True,
|
| 48 |
+
"find_hotspots_nearby": True,
|
| 49 |
+
"get_location_birds": True,
|
| 50 |
+
"get_species_info": True,
|
| 51 |
+
"get_notable_sightings": True,
|
| 52 |
+
"analyze_location": True,
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
# Initialize FastMCP server with optional auth
|
| 56 |
+
if IS_PRODUCTION and MCP_API_KEY:
|
| 57 |
+
# Production: Enable API key authentication
|
| 58 |
+
from fastmcp.server.auth.providers.debug import DebugTokenVerifier
|
| 59 |
+
|
| 60 |
+
auth = DebugTokenVerifier(
|
| 61 |
+
validate=lambda token: token == MCP_API_KEY,
|
| 62 |
+
client_id="ebird-mcp-client"
|
| 63 |
+
)
|
| 64 |
+
mcp = FastMCP("eBird Data Explorer", auth=auth)
|
| 65 |
+
else:
|
| 66 |
+
# Development: No authentication
|
| 67 |
+
mcp = FastMCP("eBird Data Explorer")
|
| 68 |
+
|
| 69 |
+
# Rate limiting tracker
|
| 70 |
+
_last_request_time = 0
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# ============================================================================
|
| 74 |
+
# HELPER FUNCTIONS
|
| 75 |
+
# ============================================================================
|
| 76 |
+
|
| 77 |
+
def _rate_limit():
|
| 78 |
+
"""Enforce rate limiting to avoid exceeding eBird's API limits"""
|
| 79 |
+
global _last_request_time
|
| 80 |
+
elapsed = time.time() - _last_request_time
|
| 81 |
+
if elapsed < RATE_LIMIT_DELAY:
|
| 82 |
+
time.sleep(RATE_LIMIT_DELAY - elapsed)
|
| 83 |
+
_last_request_time = time.time()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _make_request(endpoint: str, params: Optional[Dict] = None) -> Optional[Dict]:
|
| 87 |
+
"""
|
| 88 |
+
Centralized request handler with error handling and rate limiting.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
endpoint: API endpoint path (e.g., "/data/obs/geo/recent")
|
| 92 |
+
params: Query parameters dictionary
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
JSON response data or None on error
|
| 96 |
+
"""
|
| 97 |
+
_rate_limit()
|
| 98 |
+
try:
|
| 99 |
+
headers = {"X-eBirdApiToken": EBIRD_API_KEY}
|
| 100 |
+
url = f"{BASE_URL}{endpoint}"
|
| 101 |
+
response = requests.get(
|
| 102 |
+
url,
|
| 103 |
+
headers=headers,
|
| 104 |
+
params=params or {},
|
| 105 |
+
timeout=DEFAULT_TIMEOUT
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
if response.status_code == 200:
|
| 109 |
+
return response.json()
|
| 110 |
+
elif response.status_code == 400:
|
| 111 |
+
print(f"❌ Bad Request ({url}): {response.text[:400]}", flush=True)
|
| 112 |
+
return None
|
| 113 |
+
elif response.status_code == 401:
|
| 114 |
+
print(f"❌ Unauthorized ({url}): Check your EBIRD_API_KEY - body={response.text[:400]}", flush=True)
|
| 115 |
+
return None
|
| 116 |
+
elif response.status_code == 404:
|
| 117 |
+
print(f"❌ Not found ({url}): Invalid endpoint or resource - body={response.text[:400]}", flush=True)
|
| 118 |
+
return None
|
| 119 |
+
else:
|
| 120 |
+
print(
|
| 121 |
+
f"❌ HTTP {response.status_code} for {url} "
|
| 122 |
+
f"params={params or {}} body={response.text[:400]}",
|
| 123 |
+
flush=True,
|
| 124 |
+
)
|
| 125 |
+
return None
|
| 126 |
+
|
| 127 |
+
except requests.Timeout:
|
| 128 |
+
print(f"❌ Request timeout after {DEFAULT_TIMEOUT}s for {endpoint}", flush=True)
|
| 129 |
+
return None
|
| 130 |
+
except requests.ConnectionError:
|
| 131 |
+
print(f"❌ Connection error calling {endpoint} - check network", flush=True)
|
| 132 |
+
return None
|
| 133 |
+
except Exception as e:
|
| 134 |
+
print(f"❌ Unexpected error calling {endpoint}: {str(e)}", flush=True)
|
| 135 |
+
return None
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def _format_success_response(data: Any, **kwargs) -> str:
|
| 139 |
+
"""Format a successful response as JSON"""
|
| 140 |
+
response = {"status": "success", "data": data}
|
| 141 |
+
response.update(kwargs)
|
| 142 |
+
return json.dumps(response)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _format_error_response(error: str) -> str:
|
| 146 |
+
"""Format an error response as JSON"""
|
| 147 |
+
return json.dumps({"status": "error", "error": error})
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# ============================================================================
|
| 151 |
+
# TOOL 1: search_species
|
| 152 |
+
# ============================================================================
|
| 153 |
+
# Use case: User types "cardinal" or classifier returns "Northern Cardinal"
|
| 154 |
+
# This tool finds the species code needed for other tools
|
| 155 |
+
|
| 156 |
+
def search_species(search_term: str, max_results: int = 10) -> str:
|
| 157 |
+
"""
|
| 158 |
+
Search for bird species by common or scientific name.
|
| 159 |
+
|
| 160 |
+
This tool finds species codes needed for other lookups. Accepts:
|
| 161 |
+
- Common names: "cardinal", "blue jay", "bald eagle"
|
| 162 |
+
- Partial matches: "car" -> "Northern Cardinal", "Carolina Parakeet", etc.
|
| 163 |
+
- Scientific names: "Cardinalis cardinalis"
|
| 164 |
+
|
| 165 |
+
Can accept:
|
| 166 |
+
- User input: Direct species search
|
| 167 |
+
- Classifier output: e.g., "Northern Cardinal" from image classification
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
search_term: Bird name (common or scientific)
|
| 171 |
+
max_results: Maximum matches to return (default: 10)
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
JSON with matched species and their codes for other tools
|
| 175 |
+
|
| 176 |
+
Example:
|
| 177 |
+
search_species("cardinal")
|
| 178 |
+
-> Returns all cardinals with species codes (norcar, carcar, etc.)
|
| 179 |
+
"""
|
| 180 |
+
if not search_term or len(search_term.strip()) < 2:
|
| 181 |
+
return _format_error_response("Search term must be at least 2 characters")
|
| 182 |
+
|
| 183 |
+
try:
|
| 184 |
+
endpoint = "/ref/taxonomy/ebird"
|
| 185 |
+
params = {"fmt": "json"}
|
| 186 |
+
|
| 187 |
+
data = _make_request(endpoint, params)
|
| 188 |
+
|
| 189 |
+
if not data:
|
| 190 |
+
return _format_error_response("Failed to fetch species database")
|
| 191 |
+
|
| 192 |
+
search_lower = search_term.lower()
|
| 193 |
+
|
| 194 |
+
# Filter: match in common name OR scientific name, main species only
|
| 195 |
+
matches = [
|
| 196 |
+
{
|
| 197 |
+
"common_name": s['comName'],
|
| 198 |
+
"scientific_name": s['sciName'],
|
| 199 |
+
"species_code": s['speciesCode'],
|
| 200 |
+
"family": s.get('familyComName', 'Unknown'),
|
| 201 |
+
"order": s.get('order', 'Unknown'),
|
| 202 |
+
"category": s.get('category', 'Unknown')
|
| 203 |
+
}
|
| 204 |
+
for s in data
|
| 205 |
+
if (search_lower in s['comName'].lower() or search_lower in s['sciName'].lower()) and s.get('category') == 'species'
|
| 206 |
+
]
|
| 207 |
+
|
| 208 |
+
if not matches:
|
| 209 |
+
return _format_error_response(f"No species found matching '{search_term}'")
|
| 210 |
+
|
| 211 |
+
return _format_success_response(
|
| 212 |
+
matches[:max_results],
|
| 213 |
+
count=len(matches[:max_results]),
|
| 214 |
+
search_term=search_term
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
except Exception as e:
|
| 218 |
+
return _format_error_response(f"Search failed: {str(e)}")
|
| 219 |
+
|
| 220 |
+
# Register as MCP tool
|
| 221 |
+
mcp.tool()(search_species)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# ============================================================================
|
| 225 |
+
# TOOL 2: get_recent_sightings_nearby
|
| 226 |
+
# ============================================================================
|
| 227 |
+
# Use case: After identifying a bird, find recent sightings near user
|
| 228 |
+
|
| 229 |
+
def get_recent_sightings_nearby(
|
| 230 |
+
species_code: str,
|
| 231 |
+
latitude: float,
|
| 232 |
+
longitude: float,
|
| 233 |
+
radius_km: int = 50,
|
| 234 |
+
max_results: int = 10
|
| 235 |
+
) -> str:
|
| 236 |
+
"""
|
| 237 |
+
Get recent sightings of a specific bird near a location.
|
| 238 |
+
|
| 239 |
+
Returns observations from other birdwatchers in the eBird network.
|
| 240 |
+
|
| 241 |
+
Can accept:
|
| 242 |
+
- User input: Coordinates from address lookup, species code from search
|
| 243 |
+
- Classifier output: Species code (after search_species lookup)
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
species_code: eBird species code (e.g., "norcar" for Northern Cardinal)
|
| 247 |
+
latitude: Location latitude
|
| 248 |
+
longitude: Location longitude
|
| 249 |
+
radius_km: Search radius in kilometers (max 50)
|
| 250 |
+
max_results: Maximum observations to return
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
JSON with recent observations near location
|
| 254 |
+
|
| 255 |
+
Example:
|
| 256 |
+
get_recent_sightings_nearby("norcar", 40.7829, -73.9654, 25, 10)
|
| 257 |
+
-> Recent cardinal sightings in Central Park area
|
| 258 |
+
"""
|
| 259 |
+
if not species_code:
|
| 260 |
+
return _format_error_response("Species code required")
|
| 261 |
+
|
| 262 |
+
if not -90 <= latitude <= 90:
|
| 263 |
+
return _format_error_response("Latitude must be between -90 and 90")
|
| 264 |
+
|
| 265 |
+
if not -180 <= longitude <= 180:
|
| 266 |
+
return _format_error_response("Longitude must be between -180 and 180")
|
| 267 |
+
|
| 268 |
+
try:
|
| 269 |
+
endpoint = f"/data/obs/geo/recent/{species_code}"
|
| 270 |
+
params = {
|
| 271 |
+
"lat": latitude,
|
| 272 |
+
"lng": longitude,
|
| 273 |
+
"dist": min(radius_km, 50),
|
| 274 |
+
"maxResults": max_results
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
data = _make_request(endpoint, params)
|
| 278 |
+
|
| 279 |
+
if data is None:
|
| 280 |
+
return _format_error_response("Failed to fetch sightings")
|
| 281 |
+
|
| 282 |
+
if not data:
|
| 283 |
+
return _format_success_response(
|
| 284 |
+
[],
|
| 285 |
+
count=0,
|
| 286 |
+
location={"lat": latitude, "lng": longitude},
|
| 287 |
+
radius_km=radius_km,
|
| 288 |
+
species_code=species_code
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
sightings = [
|
| 292 |
+
{
|
| 293 |
+
"common_name": obs['comName'],
|
| 294 |
+
"scientific_name": obs['sciName'],
|
| 295 |
+
"location": obs['locName'],
|
| 296 |
+
"location_id": obs['locId'],
|
| 297 |
+
"date": obs['obsDt'],
|
| 298 |
+
"count": obs.get('howMany'),
|
| 299 |
+
"latitude": obs.get('lat'),
|
| 300 |
+
"longitude": obs.get('lng')
|
| 301 |
+
}
|
| 302 |
+
for obs in data
|
| 303 |
+
]
|
| 304 |
+
|
| 305 |
+
return _format_success_response(
|
| 306 |
+
sightings,
|
| 307 |
+
count=len(sightings),
|
| 308 |
+
location={"lat": latitude, "lng": longitude},
|
| 309 |
+
radius_km=radius_km
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
except Exception as e:
|
| 313 |
+
return _format_error_response(f"Lookup failed: {str(e)}")
|
| 314 |
+
|
| 315 |
+
# Register as MCP tool
|
| 316 |
+
mcp.tool()(get_recent_sightings_nearby)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
# ============================================================================
|
| 320 |
+
# TOOL 3: find_hotspots_nearby
|
| 321 |
+
# ============================================================================
|
| 322 |
+
# Use case: Find popular birding locations near user
|
| 323 |
+
|
| 324 |
+
def find_hotspots_nearby(
|
| 325 |
+
latitude: float,
|
| 326 |
+
longitude: float,
|
| 327 |
+
radius_km: int = 50,
|
| 328 |
+
max_results: int = 15
|
| 329 |
+
) -> str:
|
| 330 |
+
"""
|
| 331 |
+
Find popular birding hotspots (known locations) near a location.
|
| 332 |
+
|
| 333 |
+
Hotspots are locations frequented by birders where many species recorded.
|
| 334 |
+
Great for planning birding trips.
|
| 335 |
+
|
| 336 |
+
Can accept:
|
| 337 |
+
- User input: Coordinates from address lookup
|
| 338 |
+
- Classifier output: Not directly, but used after location analysis
|
| 339 |
+
|
| 340 |
+
Args:
|
| 341 |
+
latitude: Location latitude
|
| 342 |
+
longitude: Location longitude
|
| 343 |
+
radius_km: Search radius in kilometers
|
| 344 |
+
max_results: Maximum hotspots to return
|
| 345 |
+
|
| 346 |
+
Returns:
|
| 347 |
+
JSON with nearby hotspots and their details
|
| 348 |
+
|
| 349 |
+
Example:
|
| 350 |
+
find_hotspots_nearby(40.7829, -73.9654, 25, 10)
|
| 351 |
+
-> Popular birding locations near Central Park
|
| 352 |
+
"""
|
| 353 |
+
if not -90 <= latitude <= 90:
|
| 354 |
+
return _format_error_response("Latitude must be between -90 and 90")
|
| 355 |
+
|
| 356 |
+
if not -180 <= longitude <= 180:
|
| 357 |
+
return _format_error_response("Longitude must be between -180 and 180")
|
| 358 |
+
|
| 359 |
+
try:
|
| 360 |
+
endpoint = "/ref/hotspot/geo"
|
| 361 |
+
params = {
|
| 362 |
+
"lat": latitude,
|
| 363 |
+
"lng": longitude,
|
| 364 |
+
"dist": radius_km,
|
| 365 |
+
"fmt": "json"
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
data = _make_request(endpoint, params)
|
| 369 |
+
|
| 370 |
+
if data is None:
|
| 371 |
+
return _format_error_response("Failed to fetch hotspots")
|
| 372 |
+
|
| 373 |
+
if not data:
|
| 374 |
+
return _format_success_response(
|
| 375 |
+
[],
|
| 376 |
+
count=0,
|
| 377 |
+
location={"lat": latitude, "lng": longitude},
|
| 378 |
+
radius_km=radius_km,
|
| 379 |
+
message="No hotspots found nearby"
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
hotspots = [
|
| 383 |
+
{
|
| 384 |
+
"name": hotspot['locName'],
|
| 385 |
+
"location_id": hotspot['locId'],
|
| 386 |
+
"latitude": hotspot['lat'],
|
| 387 |
+
"longitude": hotspot['lng'],
|
| 388 |
+
"species_recorded": hotspot.get('numSpeciesAllTime', 0),
|
| 389 |
+
"latest_obs_date": hotspot.get('latestObsDt', 'Unknown')
|
| 390 |
+
}
|
| 391 |
+
for hotspot in data[:max_results]
|
| 392 |
+
]
|
| 393 |
+
|
| 394 |
+
return _format_success_response(
|
| 395 |
+
hotspots,
|
| 396 |
+
count=len(hotspots),
|
| 397 |
+
location={"lat": latitude, "lng": longitude},
|
| 398 |
+
radius_km=radius_km
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
except Exception as e:
|
| 402 |
+
return _format_error_response(f"Lookup failed: {str(e)}")
|
| 403 |
+
|
| 404 |
+
# Register as MCP tool
|
| 405 |
+
mcp.tool()(find_hotspots_nearby)
|
| 406 |
+
|
| 407 |
+
# ============================================================================
|
| 408 |
+
# TOOL 4: get_location_birds
|
| 409 |
+
# ============================================================================
|
| 410 |
+
# Use case: See ALL birds being seen at a location right now
|
| 411 |
+
|
| 412 |
+
def get_location_birds(
|
| 413 |
+
latitude: float,
|
| 414 |
+
longitude: float,
|
| 415 |
+
radius_km: int = 50,
|
| 416 |
+
max_results: int = 50
|
| 417 |
+
) -> str:
|
| 418 |
+
"""
|
| 419 |
+
Get ALL recent bird sightings at a location (no species filter).
|
| 420 |
+
|
| 421 |
+
Returns comprehensive view of bird activity - what's being seen right now
|
| 422 |
+
|
| 423 |
+
Can accept:
|
| 424 |
+
- User input: Coordinates from address lookup
|
| 425 |
+
- Classifier output: Not directly, but provides context for found species
|
| 426 |
+
|
| 427 |
+
Args:
|
| 428 |
+
latitude: Location latitude
|
| 429 |
+
longitude: Location longitude
|
| 430 |
+
radius_km: Search radius in kilometers
|
| 431 |
+
max_results: Maximum sightings to return
|
| 432 |
+
|
| 433 |
+
Returns:
|
| 434 |
+
JSON with all recent sightings and summary statistics
|
| 435 |
+
|
| 436 |
+
Example:
|
| 437 |
+
get_location_birds(40.7829, -73.9654, 25, 60)
|
| 438 |
+
-> All birds being seen in Central Park area right now
|
| 439 |
+
"""
|
| 440 |
+
if not -90 <= latitude <= 90:
|
| 441 |
+
return _format_error_response("Latitude must be between -90 and 90")
|
| 442 |
+
|
| 443 |
+
if not -180 <= longitude <= 180:
|
| 444 |
+
return _format_error_response("Longitude must be between -180 and 180")
|
| 445 |
+
|
| 446 |
+
try:
|
| 447 |
+
endpoint = "/data/obs/geo/recent"
|
| 448 |
+
params = {
|
| 449 |
+
"lat": latitude,
|
| 450 |
+
"lng": longitude,
|
| 451 |
+
"dist": radius_km,
|
| 452 |
+
"maxResults": max_results
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
data = _make_request(endpoint, params)
|
| 456 |
+
|
| 457 |
+
if data is None:
|
| 458 |
+
return _format_error_response("Failed to fetch sightings")
|
| 459 |
+
|
| 460 |
+
if not data:
|
| 461 |
+
return _format_success_response(
|
| 462 |
+
[],
|
| 463 |
+
count=0,
|
| 464 |
+
unique_species=0,
|
| 465 |
+
location={"lat": latitude, "lng": longitude},
|
| 466 |
+
radius_km=radius_km,
|
| 467 |
+
message="No sightings found at this location"
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
sightings = [
|
| 471 |
+
{
|
| 472 |
+
"common_name": obs['comName'],
|
| 473 |
+
"scientific_name": obs['sciName'],
|
| 474 |
+
"species_code": obs['speciesCode'],
|
| 475 |
+
"location": obs['locName'],
|
| 476 |
+
"date": obs['obsDt'],
|
| 477 |
+
"count": obs.get('howMany'),
|
| 478 |
+
"latitude": obs.get('lat'),
|
| 479 |
+
"longitude": obs.get('lng')
|
| 480 |
+
}
|
| 481 |
+
for obs in data
|
| 482 |
+
]
|
| 483 |
+
|
| 484 |
+
# Calculate unique species count
|
| 485 |
+
unique_species = len(set(obs['common_name'] for obs in sightings))
|
| 486 |
+
|
| 487 |
+
# Find most common birds
|
| 488 |
+
bird_counts = {}
|
| 489 |
+
for obs in sightings:
|
| 490 |
+
bird_counts[obs['common_name']] = bird_counts.get(obs['common_name'], 0) + 1
|
| 491 |
+
top_birds = sorted(bird_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
| 492 |
+
|
| 493 |
+
return _format_success_response(
|
| 494 |
+
sightings,
|
| 495 |
+
count=len(sightings),
|
| 496 |
+
unique_species=unique_species,
|
| 497 |
+
location={"lat": latitude, "lng": longitude},
|
| 498 |
+
radius_km=radius_km,
|
| 499 |
+
top_birds=[{"species": name, "observations": count} for name, count in top_birds]
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
except Exception as e:
|
| 503 |
+
return _format_error_response(f"Lookup failed: {str(e)}")
|
| 504 |
+
|
| 505 |
+
# Register as MCP tool
|
| 506 |
+
mcp.tool()(get_location_birds)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
# ============================================================================
|
| 510 |
+
# TOOL 5: get_species_info
|
| 511 |
+
# ============================================================================
|
| 512 |
+
# Use case: Get taxonomy and detailed info about a species
|
| 513 |
+
|
| 514 |
+
def get_species_info(species_code: str) -> str:
|
| 515 |
+
"""
|
| 516 |
+
Get detailed taxonomy and metadata for a bird species.
|
| 517 |
+
|
| 518 |
+
Returns scientific classification, family, order, and other details.
|
| 519 |
+
|
| 520 |
+
Can accept:
|
| 521 |
+
- User input: Species code from search_species tool
|
| 522 |
+
- Classifier output: Species code (after search_species lookup)
|
| 523 |
+
|
| 524 |
+
Args:
|
| 525 |
+
species_code: eBird species code (e.g., "norcar")
|
| 526 |
+
|
| 527 |
+
Returns:
|
| 528 |
+
JSON with complete species information
|
| 529 |
+
|
| 530 |
+
Example:
|
| 531 |
+
get_species_info("norcar")
|
| 532 |
+
-> Northern Cardinal taxonomy, family, order, etc.
|
| 533 |
+
"""
|
| 534 |
+
if not species_code or len(species_code.strip()) < 2:
|
| 535 |
+
return _format_error_response("Species code required")
|
| 536 |
+
|
| 537 |
+
try:
|
| 538 |
+
endpoint = "/ref/taxonomy/ebird"
|
| 539 |
+
params = {
|
| 540 |
+
"fmt": "json",
|
| 541 |
+
"species": species_code
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
data = _make_request(endpoint, params)
|
| 545 |
+
|
| 546 |
+
if data is None:
|
| 547 |
+
return _format_error_response("Failed to fetch taxonomy")
|
| 548 |
+
|
| 549 |
+
# Find main species (not subspecies)
|
| 550 |
+
species = None
|
| 551 |
+
for s in data:
|
| 552 |
+
if s.get('speciesCode') == species_code and s.get('category') == 'species':
|
| 553 |
+
species = s
|
| 554 |
+
break
|
| 555 |
+
|
| 556 |
+
if not species:
|
| 557 |
+
return _format_error_response(f"Species code '{species_code}' not found")
|
| 558 |
+
|
| 559 |
+
info = {
|
| 560 |
+
"common_name": species['comName'],
|
| 561 |
+
"scientific_name": species['sciName'],
|
| 562 |
+
"species_code": species['speciesCode'],
|
| 563 |
+
"family": species.get('familyComName', 'Unknown'),
|
| 564 |
+
"family_sci_name": species.get('familySciName', 'Unknown'),
|
| 565 |
+
"order": species.get('order', 'Unknown'),
|
| 566 |
+
"category": species.get('category', 'Unknown')
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
return _format_success_response(info, species_code=species_code)
|
| 570 |
+
|
| 571 |
+
except Exception as e:
|
| 572 |
+
return _format_error_response(f"Lookup failed: {str(e)}")
|
| 573 |
+
|
| 574 |
+
# Register as MCP tool
|
| 575 |
+
mcp.tool()(get_species_info)
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
# ============================================================================
|
| 579 |
+
# TOOL 6: get_notable_sightings
|
| 580 |
+
# ============================================================================
|
| 581 |
+
# Use case: Find rare/unusual birds in a region
|
| 582 |
+
|
| 583 |
+
def get_notable_sightings(
|
| 584 |
+
region_code: str = "US",
|
| 585 |
+
max_results: int = 10
|
| 586 |
+
) -> str:
|
| 587 |
+
"""
|
| 588 |
+
Get rare or notable bird sightings in a region.
|
| 589 |
+
|
| 590 |
+
Notable sightings are birds that are unusual/rare for the region.
|
| 591 |
+
Great for discovering unexpected species.
|
| 592 |
+
|
| 593 |
+
Can accept:
|
| 594 |
+
- User input: Region code (e.g., "US", "US-NY", "CA-ON")
|
| 595 |
+
- Classifier output: Not directly, but region can be derived from location
|
| 596 |
+
|
| 597 |
+
Args:
|
| 598 |
+
region_code: Region code (country, state, province)
|
| 599 |
+
max_results: Maximum notable sightings to return
|
| 600 |
+
|
| 601 |
+
Returns:
|
| 602 |
+
JSON with recent notable/rare sightings
|
| 603 |
+
|
| 604 |
+
Example:
|
| 605 |
+
get_notable_sightings("US-NY", 10)
|
| 606 |
+
-> Rare/unusual birds spotted in New York recently
|
| 607 |
+
"""
|
| 608 |
+
if not region_code:
|
| 609 |
+
return _format_error_response("Region code required")
|
| 610 |
+
|
| 611 |
+
try:
|
| 612 |
+
endpoint = f"/data/obs/{region_code}/recent/notable"
|
| 613 |
+
params = {"maxResults": max_results}
|
| 614 |
+
|
| 615 |
+
data = _make_request(endpoint, params)
|
| 616 |
+
|
| 617 |
+
if data is None:
|
| 618 |
+
return _format_error_response("Failed to fetch notable sightings")
|
| 619 |
+
|
| 620 |
+
if not data:
|
| 621 |
+
return _format_success_response(
|
| 622 |
+
[],
|
| 623 |
+
count=0,
|
| 624 |
+
region_code=region_code,
|
| 625 |
+
message="No notable sightings found"
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
notable = [
|
| 629 |
+
{
|
| 630 |
+
"common_name": obs['comName'],
|
| 631 |
+
"scientific_name": obs['sciName'],
|
| 632 |
+
"species_code": obs['speciesCode'],
|
| 633 |
+
"location": obs['locName'],
|
| 634 |
+
"location_id": obs['locId'],
|
| 635 |
+
"date": obs['obsDt'],
|
| 636 |
+
"count": obs.get('howMany'),
|
| 637 |
+
"latitude": obs.get('lat'),
|
| 638 |
+
"longitude": obs.get('lng')
|
| 639 |
+
}
|
| 640 |
+
for obs in data
|
| 641 |
+
]
|
| 642 |
+
|
| 643 |
+
return _format_success_response(
|
| 644 |
+
notable,
|
| 645 |
+
count=len(notable),
|
| 646 |
+
region_code=region_code
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
except Exception as e:
|
| 650 |
+
return _format_error_response(f"Lookup failed: {str(e)}")
|
| 651 |
+
|
| 652 |
+
# Register as MCP tool
|
| 653 |
+
mcp.tool()(get_notable_sightings)
|
| 654 |
+
|
| 655 |
+
# ============================================================================
|
| 656 |
+
# TOOL 7: analyze_location
|
| 657 |
+
# ============================================================================
|
| 658 |
+
# Use case: Comprehensive location analysis - all birds + hotspots + summary
|
| 659 |
+
|
| 660 |
+
def analyze_location(
|
| 661 |
+
latitude: float,
|
| 662 |
+
longitude: float,
|
| 663 |
+
radius_km: int = 50
|
| 664 |
+
) -> str:
|
| 665 |
+
"""
|
| 666 |
+
Comprehensive location analysis combining all bird data.
|
| 667 |
+
|
| 668 |
+
This is a "power tool" that combines multiple API calls to give
|
| 669 |
+
complete view of birding activity: recent sightings, hotspots, stats.
|
| 670 |
+
|
| 671 |
+
Can accept:
|
| 672 |
+
- User input: Coordinates from address lookup
|
| 673 |
+
- Classifier output: Not directly, but provides full context
|
| 674 |
+
|
| 675 |
+
Args:
|
| 676 |
+
latitude: Location latitude
|
| 677 |
+
longitude: Location longitude
|
| 678 |
+
radius_km: Search radius in kilometers
|
| 679 |
+
|
| 680 |
+
Returns:
|
| 681 |
+
JSON with sightings, hotspots, and comprehensive statistics
|
| 682 |
+
|
| 683 |
+
Example:
|
| 684 |
+
analyze_location(40.7820, -73.9654, 25)
|
| 685 |
+
-> Complete birding report for Central Park area
|
| 686 |
+
"""
|
| 687 |
+
if not -90 <= latitude <= 90:
|
| 688 |
+
return _format_error_response("Latitude must be between -90 and 90")
|
| 689 |
+
|
| 690 |
+
if not -180 <= longitude <= 180:
|
| 691 |
+
return _format_error_response("Longitude must be between -180 and 180")
|
| 692 |
+
|
| 693 |
+
try:
|
| 694 |
+
# Get all recent observations
|
| 695 |
+
obs_endpoint = "/data/obs/geo/recent"
|
| 696 |
+
obs_params = {
|
| 697 |
+
"lat": latitude,
|
| 698 |
+
"lng": longitude,
|
| 699 |
+
"dist": radius_km,
|
| 700 |
+
"maxResults": 100
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
sightings_data = _make_request(obs_endpoint, obs_params) or []
|
| 704 |
+
|
| 705 |
+
# Get hotspots (max 50)
|
| 706 |
+
hotspots_endpoint = "/ref/hotspot/geo"
|
| 707 |
+
hotspots_params = {
|
| 708 |
+
"lat": latitude,
|
| 709 |
+
"lng": longitude,
|
| 710 |
+
"dist": radius_km,
|
| 711 |
+
"fmt": "json"
|
| 712 |
+
}
|
| 713 |
+
|
| 714 |
+
hotspots_data = _make_request(hotspots_endpoint, hotspots_params) or []
|
| 715 |
+
|
| 716 |
+
# Format sightings
|
| 717 |
+
sightings = [
|
| 718 |
+
{
|
| 719 |
+
"common_name": obs['comName'],
|
| 720 |
+
"scientific_name": obs['sciName'],
|
| 721 |
+
"species_code": obs['speciesCode'],
|
| 722 |
+
"location": obs['locName'],
|
| 723 |
+
"date": obs['obsDt'],
|
| 724 |
+
"count": obs.get('howMany'),
|
| 725 |
+
"latitude": obs.get('lat'),
|
| 726 |
+
"longitude": obs.get('lng')
|
| 727 |
+
}
|
| 728 |
+
for obs in sightings_data
|
| 729 |
+
]
|
| 730 |
+
|
| 731 |
+
# Format hotspots
|
| 732 |
+
hotspots = [
|
| 733 |
+
{
|
| 734 |
+
"name": hotspot['locName'],
|
| 735 |
+
"location_id": hotspot['locId'],
|
| 736 |
+
"latitude": hotspot['lat'],
|
| 737 |
+
"longitude": hotspot['lng'],
|
| 738 |
+
"species_recorded": hotspot.get('numSpeciesAllTime', 0),
|
| 739 |
+
"latest_obs_date": hotspot.get('latestObsDt', 'Unknown')
|
| 740 |
+
}
|
| 741 |
+
for hotspot in hotspots_data[:15]
|
| 742 |
+
]
|
| 743 |
+
|
| 744 |
+
# Calculate statistics
|
| 745 |
+
unique_species = len(set(obs['common_name'] for obs in sightings))
|
| 746 |
+
|
| 747 |
+
# Find top species
|
| 748 |
+
bird_counts = {}
|
| 749 |
+
for obs in sightings:
|
| 750 |
+
bird_counts[obs['common_name']] = bird_counts.get(obs['common_name'], 0) + 1
|
| 751 |
+
top_birds = sorted(bird_counts.items(), key=lambda x: x[1], reverse=True)[:10]
|
| 752 |
+
|
| 753 |
+
analysis = {
|
| 754 |
+
"location": {
|
| 755 |
+
"latitude": latitude,
|
| 756 |
+
"longitude": longitude,
|
| 757 |
+
"radius_km": radius_km,
|
| 758 |
+
},
|
| 759 |
+
"sightings": sightings,
|
| 760 |
+
"hotspots": hotspots,
|
| 761 |
+
"summary": {
|
| 762 |
+
"total_sightings": len(sightings),
|
| 763 |
+
"unique_species": unique_species,
|
| 764 |
+
"total_hotspots": len(hotspots),
|
| 765 |
+
"top_species": [{"name": name, "observations": count} for name, count in top_birds]
|
| 766 |
+
}
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
return _format_success_response(analysis)
|
| 770 |
+
|
| 771 |
+
except Exception as e:
|
| 772 |
+
return _format_error_response(f"Analysis failed: {str(e)}")
|
| 773 |
+
|
| 774 |
+
# Register as MCP tool
|
| 775 |
+
mcp.tool()(analyze_location)
|
| 776 |
+
|
| 777 |
+
# ============================================================================
|
| 778 |
+
# SERVER STARTUP WITH DUAL TRANSPORT SUPPORT
|
| 779 |
+
# ============================================================================
|
| 780 |
+
|
| 781 |
+
def main():
|
| 782 |
+
"""Start the MCP server with dual transport support."""
|
| 783 |
+
# Determine transport mode first
|
| 784 |
+
is_http_mode = "--http" in sys.argv or "--streamable-http" in sys.argv
|
| 785 |
+
|
| 786 |
+
# For STDIO mode, all informational output must go to stderr (stdout is for JSON-RPC only)
|
| 787 |
+
output = sys.stdout if is_http_mode else sys.stderr
|
| 788 |
+
|
| 789 |
+
print("\n" + "=" * 70, file=output)
|
| 790 |
+
print("🦅 [eBird MCP SERVER] - Starting...", file=output)
|
| 791 |
+
print("=" * 70, file=output)
|
| 792 |
+
print(f"[API KEY]: {'✅ Configured' if EBIRD_API_KEY else '❌ Missing'}", file=output)
|
| 793 |
+
print("\n[AVAILABLE TOOLS]:", file=output)
|
| 794 |
+
|
| 795 |
+
tools_list = [
|
| 796 |
+
"1. search_species - Find species by name",
|
| 797 |
+
"2. get_recent_sightings_nearby - Recent sightings near location",
|
| 798 |
+
"3. find_hotspots_nearby - Find popular birding locations",
|
| 799 |
+
"4. get_location_birds - All birds at a location",
|
| 800 |
+
"5. get_species_info - Taxonomy and species details",
|
| 801 |
+
"6. get_notable_sightings - Rare/unusual birds in region",
|
| 802 |
+
"7. analyze_location - Comprehensive location analysis"
|
| 803 |
+
]
|
| 804 |
+
|
| 805 |
+
for tool in tools_list:
|
| 806 |
+
print(f" ✓ {tool}", file=output)
|
| 807 |
+
|
| 808 |
+
print("\n" + "=" * 70, file=output)
|
| 809 |
+
|
| 810 |
+
if is_http_mode:
|
| 811 |
+
# Extract port from command line args
|
| 812 |
+
port = 8000
|
| 813 |
+
host = "127.0.0.1"
|
| 814 |
+
|
| 815 |
+
for i, arg in enumerate(sys.argv):
|
| 816 |
+
if arg == "--port" and i + 1 < len(sys.argv):
|
| 817 |
+
port = int(sys.argv[i + 1])
|
| 818 |
+
elif arg == "--host" and i + 1 < len(sys.argv):
|
| 819 |
+
host = sys.argv[i + 1]
|
| 820 |
+
|
| 821 |
+
print("[TRANSPORT]: Starting streamable-http MCP server", file=output)
|
| 822 |
+
print(f"[HOST]: {host}", file=output)
|
| 823 |
+
print(f"[PORT]: {port}", file=output)
|
| 824 |
+
print(f"[URL]: http://{host}:{port}", file=output)
|
| 825 |
+
print(f"[AUTH]: {'🔒 Enabled (production)' if IS_PRODUCTION and MCP_API_KEY else '🔓 Disabled (development)'}", file=output)
|
| 826 |
+
print("[NOTE]: This is proper MCP over HTTP", file=output)
|
| 827 |
+
print("=" * 70 + "\n", file=output)
|
| 828 |
+
|
| 829 |
+
# Run with streamable-http transport (built-in MCP support)
|
| 830 |
+
mcp.run(transport="streamable-http", host=host, port=port)
|
| 831 |
+
else:
|
| 832 |
+
print("[TRANSPORT]: Running as stdio MCP server", file=output)
|
| 833 |
+
print("[NOTE]: For HTTP transport, use: python ebird_tools.py --http", file=output)
|
| 834 |
+
print("=" * 70 + "\n", file=output)
|
| 835 |
+
|
| 836 |
+
# Run as stdio MCP server (default)
|
| 837 |
+
mcp.run(transport="stdio")
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
if __name__ == "__main__":
|
| 841 |
+
main()
|
docs/bonus_ebird_docs/EBIRD_MCP_README.md
ADDED
|
@@ -0,0 +1,972 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# eBird MCP Server - BONUS Integration
|
| 2 |
+
|
| 3 |
+
Complete eBird API wrapper as a Model Context Protocol (MCP) server with **dual transport support** and **full MCP protocol compliance**.
|
| 4 |
+
|
| 5 |
+
## 🎯 BONUS: Integrate eBird into Your Agent Framework
|
| 6 |
+
|
| 7 |
+
**Want to replace nuthatch with eBird for real-time bird sightings?** Here's how to plug this bonus eBird MCP server into your existing BirdScope AI system:
|
| 8 |
+
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
### 🚀 **Architecture Options**
|
| 12 |
+
|
| 13 |
+
**Choose your approach:**
|
| 14 |
+
|
| 15 |
+
#### **Option A: Replace Nuthatch (Recommended for simplicity)**
|
| 16 |
+
Follow steps 1-8 below to swap nuthatch for eBird.
|
| 17 |
+
|
| 18 |
+
#### **Option B: Run Both Nuthatch + eBird (Maximum capabilities)**
|
| 19 |
+
|
| 20 |
+
Get the **best of both worlds** - static species data + real-time sightings:
|
| 21 |
+
|
| 22 |
+
1. **Keep nuthatch running** (your existing server)
|
| 23 |
+
2. **Add eBird as bonus server** on different port:
|
| 24 |
+
```bash
|
| 25 |
+
python bonus_ebird_tools.py --http --port 8001
|
| 26 |
+
```
|
| 27 |
+
3. **Update MCPClientManager** to connect to both:
|
| 28 |
+
```python
|
| 29 |
+
# Add to langgraph_agent/mcp_clients.py
|
| 30 |
+
async def create_multi_server_client(self):
|
| 31 |
+
clients = []
|
| 32 |
+
|
| 33 |
+
# Keep nuthatch (stdio)
|
| 34 |
+
nuthatch_client = await self.create_stdio_client("python nuthatch_tools.py")
|
| 35 |
+
clients.append(nuthatch_client)
|
| 36 |
+
|
| 37 |
+
# Add eBird (HTTP on port 8001)
|
| 38 |
+
ebird_client = await self.create_http_client("http://localhost:8001/mcp")
|
| 39 |
+
clients.append(ebird_client)
|
| 40 |
+
|
| 41 |
+
# Keep Modal
|
| 42 |
+
modal_client = await self.create_http_client(AgentConfig.MODAL_MCP_URL)
|
| 43 |
+
clients.append(modal_client)
|
| 44 |
+
|
| 45 |
+
return clients
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
4. **Combine toolsets** in subagent configs:
|
| 49 |
+
```python
|
| 50 |
+
"species_explorer": {
|
| 51 |
+
"tools": [
|
| 52 |
+
# Nuthatch tools
|
| 53 |
+
"search_birds", "get_bird_info", "get_bird_images", "get_bird_audio",
|
| 54 |
+
# eBird tools
|
| 55 |
+
"search_species", "get_recent_sightings_nearby", "find_hotspots_nearby"
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
**Benefits:** Complete birding AI with species database + real-time sightings + image classification.
|
| 61 |
+
|
| 62 |
+
---
|
| 63 |
+
|
| 64 |
+
### Step 1: Install eBird Dependencies
|
| 65 |
+
|
| 66 |
+
```bash
|
| 67 |
+
pip install requests python-dotenv
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
### Step 2: Get eBird API Key
|
| 71 |
+
|
| 72 |
+
1. Visit: https://ebird.org/api/keygen
|
| 73 |
+
2. Create account and generate API key
|
| 74 |
+
3. Add to your `.env` file:
|
| 75 |
+
```bash
|
| 76 |
+
EBIRD_API_KEY=your-actual-ebird-key-here
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
### Step 3: Start eBird MCP Server
|
| 80 |
+
|
| 81 |
+
Choose your transport mode:
|
| 82 |
+
|
| 83 |
+
**Option A: Stdio Transport (for agents)**
|
| 84 |
+
```bash
|
| 85 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 86 |
+
python bonus_ebird_tools.py
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
**Option B: HTTP Transport (for web UIs)**
|
| 90 |
+
```bash
|
| 91 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 92 |
+
python bonus_ebird_tools.py --http --port 8001
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
### Step 4: Update Environment Variables
|
| 96 |
+
|
| 97 |
+
In your `.env` file, replace nuthatch with eBird:
|
| 98 |
+
|
| 99 |
+
```bash
|
| 100 |
+
# OLD (nuthatch)
|
| 101 |
+
NUTHATCH_USE_STDIO=true
|
| 102 |
+
NUTHATCH_API_KEY=your-nuthatch-key
|
| 103 |
+
NUTHATCH_BASE_URL=https://nuthatch.lastelm.software/v2
|
| 104 |
+
|
| 105 |
+
# NEW (eBird)
|
| 106 |
+
EBIRD_USE_STDIO=true
|
| 107 |
+
EBIRD_API_KEY=your-ebird-key
|
| 108 |
+
EBIRD_BASE_URL=https://api.ebird.org/v2
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
### Step 5: Update MCP Client Manager
|
| 112 |
+
|
| 113 |
+
Modify `langgraph_agent/mcp_clients.py` to connect to eBird instead of nuthatch:
|
| 114 |
+
|
| 115 |
+
```python
|
| 116 |
+
# In create_multi_server_client() function:
|
| 117 |
+
|
| 118 |
+
# OLD: Nuthatch connection
|
| 119 |
+
# nuthatch_client = await self.create_stdio_client("python nuthatch_tools.py")
|
| 120 |
+
|
| 121 |
+
# NEW: eBird connection
|
| 122 |
+
ebird_client = await self.create_stdio_client("python bonus_ebird_tools.py")
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### Step 6: Update Subagent Tool Configurations
|
| 126 |
+
|
| 127 |
+
Update `langgraph_agent/subagent_config.py` to use eBird tools:
|
| 128 |
+
|
| 129 |
+
```python
|
| 130 |
+
# OLD: Nuthatch tools
|
| 131 |
+
"search_birds": True,
|
| 132 |
+
"get_bird_info": True,
|
| 133 |
+
"get_bird_images": True,
|
| 134 |
+
"get_bird_audio": True,
|
| 135 |
+
|
| 136 |
+
# NEW: eBird tools
|
| 137 |
+
"search_species": True,
|
| 138 |
+
"get_recent_sightings_nearby": True,
|
| 139 |
+
"find_hotspots_nearby": True,
|
| 140 |
+
"get_location_birds": True,
|
| 141 |
+
"get_species_info": True,
|
| 142 |
+
"get_notable_sightings": True,
|
| 143 |
+
"analyze_location": True,
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
**Important:** eBird focuses on **real-time sightings** while nuthatch focuses on **species database**. Your subagents will now prioritize location-based bird data over static species information.
|
| 147 |
+
|
| 148 |
+
### Step 7: Test the Integration
|
| 149 |
+
|
| 150 |
+
```bash
|
| 151 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 152 |
+
python tests/test_subagents.py
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
This will test that your subagents can use the new eBird tools.
|
| 156 |
+
|
| 157 |
+
### Step 8: Update Agent Prompts (Optional)
|
| 158 |
+
|
| 159 |
+
Consider updating subagent prompts in `subagent_config.py` to leverage eBird's location features:
|
| 160 |
+
|
| 161 |
+
```python
|
| 162 |
+
"species_explorer": {
|
| 163 |
+
# ... existing config ...
|
| 164 |
+
"prompt": """You are a Species Explorer with access to real-time eBird data.
|
| 165 |
+
|
| 166 |
+
**Your New Capabilities:**
|
| 167 |
+
- Find recent sightings of birds near locations
|
| 168 |
+
- Discover popular birding hotspots
|
| 169 |
+
- Analyze complete location bird activity
|
| 170 |
+
- Track notable/rare bird sightings
|
| 171 |
+
|
| 172 |
+
# ... rest of prompt ...
|
| 173 |
+
"""
|
| 174 |
+
}
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
### What Changes in Your Agent Behavior
|
| 178 |
+
|
| 179 |
+
| **Aspect** | **Before (Nuthatch)** | **After (eBird)** |
|
| 180 |
+
|------------|----------------------|-------------------|
|
| 181 |
+
| **Data Focus** | Species database, images, audio | Real-time sightings, locations |
|
| 182 |
+
| **Location Tools** | None | ✅ Hotspots, sightings, analysis |
|
| 183 |
+
| **Freshness** | Static database | ⚡ Live bird observations |
|
| 184 |
+
| **Use Cases** | Species identification | 🗺️ Birding trip planning |
|
| 185 |
+
| **Coverage** | 1000+ species (NA/Europe) | 🌍 Global bird sightings |
|
| 186 |
+
|
| 187 |
+
### Expected Agent Capabilities
|
| 188 |
+
|
| 189 |
+
After integration, your agents can now:
|
| 190 |
+
|
| 191 |
+
1. **Find Recent Sightings**: "Where have Northern Cardinals been seen recently?"
|
| 192 |
+
2. **Discover Hotspots**: "What are the best birding locations near Central Park?"
|
| 193 |
+
3. **Location Analysis**: "Give me a complete birding report for this area"
|
| 194 |
+
4. **Rare Bird Alerts**: "What unusual birds have been spotted in New York?"
|
| 195 |
+
5. **Trip Planning**: "Plan a birding trip based on current sightings"
|
| 196 |
+
|
| 197 |
+
### Troubleshooting Integration
|
| 198 |
+
|
| 199 |
+
**Issue: "Unknown mode" error**
|
| 200 |
+
- Check that subagent configs use valid eBird tool names
|
| 201 |
+
- Verify eBird server is running
|
| 202 |
+
|
| 203 |
+
**Issue: No location data**
|
| 204 |
+
- Ensure coordinates are passed to location-based tools
|
| 205 |
+
- Check eBird API key is valid
|
| 206 |
+
|
| 207 |
+
**Issue: Different data format**
|
| 208 |
+
- eBird returns observation data, not static species info
|
| 209 |
+
- Update any code expecting nuthatch's data structure
|
| 210 |
+
|
| 211 |
+
### Rollback Instructions
|
| 212 |
+
|
| 213 |
+
To switch back to nuthatch:
|
| 214 |
+
|
| 215 |
+
1. Stop eBird server
|
| 216 |
+
2. Revert `.env` variables to nuthatch
|
| 217 |
+
3. Update `mcp_clients.py` back to nuthatch
|
| 218 |
+
4. Revert `subagent_config.py` tool names
|
| 219 |
+
5. Restart with `python nuthatch_tools.py`
|
| 220 |
+
|
| 221 |
+
---
|
| 222 |
+
|
| 223 |
+
## 📋 Overview
|
| 224 |
+
|
| 225 |
+
This server provides 7 powerful tools for discovering and analyzing bird data:
|
| 226 |
+
|
| 227 |
+
1. **search_species** - Find bird species by common or scientific name
|
| 228 |
+
2. **get_recent_sightings_nearby** - Find recent observations of a specific species near a location
|
| 229 |
+
3. **find_hotspots_nearby** - Discover popular birding locations
|
| 230 |
+
4. **get_location_birds** - See all birds currently being reported at a location
|
| 231 |
+
5. **get_species_info** - Get detailed taxonomy information about a species
|
| 232 |
+
6. **get_notable_sightings** - Find rare or unusual birds in a region
|
| 233 |
+
7. **analyze_location** - Comprehensive location analysis (combines all data)
|
| 234 |
+
|
| 235 |
+
## 🚀 Quick Start
|
| 236 |
+
|
| 237 |
+
### Prerequisites
|
| 238 |
+
|
| 239 |
+
```bash
|
| 240 |
+
# Install dependencies
|
| 241 |
+
pip install fastmcp requests python-dotenv
|
| 242 |
+
|
| 243 |
+
# Set up environment variables
|
| 244 |
+
echo "EBIRD_API_KEY=your-key-here" > .env
|
| 245 |
+
```
|
| 246 |
+
|
| 247 |
+
Get your eBird API key: https://ebird.org/api/keygen
|
| 248 |
+
|
| 249 |
+
### Transport Options
|
| 250 |
+
|
| 251 |
+
The server supports **two MCP-compliant transport modes**:
|
| 252 |
+
|
| 253 |
+
#### Option 1: Stdio Transport (Default)
|
| 254 |
+
**Best for:** LangGraph agents, subprocess communication, CLI usage
|
| 255 |
+
|
| 256 |
+
```bash
|
| 257 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 258 |
+
python ebird_tools.py
|
| 259 |
+
```
|
| 260 |
+
|
| 261 |
+
**Features:**
|
| 262 |
+
- ✅ Full MCP protocol via stdin/stdout
|
| 263 |
+
- ✅ Managed by FastMCP's StdioTransport
|
| 264 |
+
- ✅ Perfect for agent-to-agent communication
|
| 265 |
+
- ✅ No HTTP overhead, fastest performance
|
| 266 |
+
- ✅ Subprocess lifecycle managed automatically
|
| 267 |
+
|
| 268 |
+
**Use when:**
|
| 269 |
+
- Building LangGraph agents that need MCP tools
|
| 270 |
+
- Integrating with Claude Desktop or other MCP clients
|
| 271 |
+
- Running as a subprocess from Python code
|
| 272 |
+
|
| 273 |
+
#### Option 2: Streamable-HTTP Transport
|
| 274 |
+
**Best for:** Gradio dashboards, web clients, HuggingFace Spaces
|
| 275 |
+
|
| 276 |
+
```bash
|
| 277 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 278 |
+
python ebird_tools.py --http --port 8000
|
| 279 |
+
```
|
| 280 |
+
|
| 281 |
+
Server will start on `http://localhost:8000`
|
| 282 |
+
|
| 283 |
+
**Features:**
|
| 284 |
+
- ✅ Full MCP protocol over HTTP at `/mcp` endpoint
|
| 285 |
+
- ✅ Managed by FastMCP's built-in HTTP server
|
| 286 |
+
- ✅ Perfect for web-based UIs like Gradio
|
| 287 |
+
- ✅ Can be deployed to HuggingFace Spaces
|
| 288 |
+
- ✅ Cross-platform, language-agnostic access
|
| 289 |
+
|
| 290 |
+
**Use when:**
|
| 291 |
+
- Building Gradio dashboards
|
| 292 |
+
- Deploying to HuggingFace Spaces
|
| 293 |
+
- Need HTTP-based access to MCP tools
|
| 294 |
+
- Connecting from web clients
|
| 295 |
+
|
| 296 |
+
#### Option 3: Direct HTTP Endpoints (Non-MCP)
|
| 297 |
+
**Best for:** Quick testing, direct API access without MCP protocol
|
| 298 |
+
|
| 299 |
+
```bash
|
| 300 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 301 |
+
python run_ebird_http.py
|
| 302 |
+
```
|
| 303 |
+
|
| 304 |
+
Server will start on `http://localhost:8000`
|
| 305 |
+
|
| 306 |
+
**Available Endpoints:**
|
| 307 |
+
- `GET /health` - Health check
|
| 308 |
+
- `GET /tools` - List available tools
|
| 309 |
+
- `POST /tools/{tool_name}` - Call a specific tool (bypasses MCP)
|
| 310 |
+
|
| 311 |
+
**Note:** This bypasses MCP protocol and calls functions directly. Use Options 1 or 2 for hackathon compliance.
|
| 312 |
+
|
| 313 |
+
## 📚 Tool Documentation
|
| 314 |
+
|
| 315 |
+
### Tool 1: search_species
|
| 316 |
+
|
| 317 |
+
Find bird species by name.
|
| 318 |
+
|
| 319 |
+
**Input:**
|
| 320 |
+
```python
|
| 321 |
+
search_species(
|
| 322 |
+
search_term: str, # "cardinal", "blue jay", etc.
|
| 323 |
+
max_results: int = 10 # How many results to return
|
| 324 |
+
)
|
| 325 |
+
```
|
| 326 |
+
|
| 327 |
+
**Example:**
|
| 328 |
+
```bash
|
| 329 |
+
curl -X POST http://localhost:8000/tools/search_species \
|
| 330 |
+
-H "Content-Type: application/json" \
|
| 331 |
+
-d '{"search_term": "cardinal", "max_results": 5}'
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
**Response:**
|
| 335 |
+
```json
|
| 336 |
+
{
|
| 337 |
+
"status": "success",
|
| 338 |
+
"data": [
|
| 339 |
+
{
|
| 340 |
+
"common_name": "Northern Cardinal",
|
| 341 |
+
"scientific_name": "Cardinalis cardinalis",
|
| 342 |
+
"species_code": "norcar",
|
| 343 |
+
"family": "Cardinals and Allies",
|
| 344 |
+
"order": "Passeriformes"
|
| 345 |
+
}
|
| 346 |
+
],
|
| 347 |
+
"count": 1,
|
| 348 |
+
"search_term": "cardinal"
|
| 349 |
+
}
|
| 350 |
+
```
|
| 351 |
+
|
| 352 |
+
**Use Cases:**
|
| 353 |
+
- User enters a bird name → get species code
|
| 354 |
+
- Bird classifier returns "Northern Cardinal" → get species code
|
| 355 |
+
- Resolve names to codes for other tools
|
| 356 |
+
|
| 357 |
+
---
|
| 358 |
+
|
| 359 |
+
### Tool 2: get_recent_sightings_nearby
|
| 360 |
+
|
| 361 |
+
Find recent observations of a species near a location.
|
| 362 |
+
|
| 363 |
+
**Input:**
|
| 364 |
+
```python
|
| 365 |
+
get_recent_sightings_nearby(
|
| 366 |
+
species_code: str, # "norcar", "blueja", etc.
|
| 367 |
+
latitude: float, # 40.7829
|
| 368 |
+
longitude: float, # -73.9654
|
| 369 |
+
radius_km: int = 50, # Search radius
|
| 370 |
+
max_results: int = 10 # How many results to return
|
| 371 |
+
)
|
| 372 |
+
```
|
| 373 |
+
|
| 374 |
+
**Example:**
|
| 375 |
+
```bash
|
| 376 |
+
curl -X POST http://localhost:8000/tools/get_recent_sightings_nearby \
|
| 377 |
+
-H "Content-Type: application/json" \
|
| 378 |
+
-d '{
|
| 379 |
+
"species_code": "norcar",
|
| 380 |
+
"latitude": 40.7829,
|
| 381 |
+
"longitude": -73.9654,
|
| 382 |
+
"radius_km": 25,
|
| 383 |
+
"max_results": 10
|
| 384 |
+
}'
|
| 385 |
+
```
|
| 386 |
+
|
| 387 |
+
**Response:**
|
| 388 |
+
```json
|
| 389 |
+
{
|
| 390 |
+
"status": "success",
|
| 391 |
+
"data": [
|
| 392 |
+
{
|
| 393 |
+
"common_name": "Northern Cardinal",
|
| 394 |
+
"scientific_name": "Cardinalis cardinalis",
|
| 395 |
+
"location": "Central Park",
|
| 396 |
+
"location_id": "L12345",
|
| 397 |
+
"date": "2025-11-22 14:30",
|
| 398 |
+
"count": 2,
|
| 399 |
+
"latitude": 40.7829,
|
| 400 |
+
"longitude": -73.9654
|
| 401 |
+
}
|
| 402 |
+
],
|
| 403 |
+
"count": 5,
|
| 404 |
+
"location": {"lat": 40.7829, "lng": -73.9654},
|
| 405 |
+
"radius_km": 25
|
| 406 |
+
}
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
**Use Cases:**
|
| 410 |
+
- After classifying a bird image, find where else it's been seen recently
|
| 411 |
+
- Explore local species distribution
|
| 412 |
+
- Plan birding trips based on recent sightings
|
| 413 |
+
|
| 414 |
+
---
|
| 415 |
+
|
| 416 |
+
### Tool 3: find_hotspots_nearby
|
| 417 |
+
|
| 418 |
+
Discover popular birding locations near coordinates.
|
| 419 |
+
|
| 420 |
+
**Input:**
|
| 421 |
+
```python
|
| 422 |
+
find_hotspots_nearby(
|
| 423 |
+
latitude: float,
|
| 424 |
+
longitude: float,
|
| 425 |
+
radius_km: int = 50,
|
| 426 |
+
max_results: int = 15
|
| 427 |
+
)
|
| 428 |
+
```
|
| 429 |
+
|
| 430 |
+
**Example:**
|
| 431 |
+
```bash
|
| 432 |
+
curl -X POST http://localhost:8000/tools/find_hotspots_nearby \
|
| 433 |
+
-H "Content-Type: application/json" \
|
| 434 |
+
-d '{
|
| 435 |
+
"latitude": 40.7829,
|
| 436 |
+
"longitude": -73.9654,
|
| 437 |
+
"radius_km": 25,
|
| 438 |
+
"max_results": 10
|
| 439 |
+
}'
|
| 440 |
+
```
|
| 441 |
+
|
| 442 |
+
**Response:**
|
| 443 |
+
```json
|
| 444 |
+
{
|
| 445 |
+
"status": "success",
|
| 446 |
+
"data": [
|
| 447 |
+
{
|
| 448 |
+
"name": "Central Park",
|
| 449 |
+
"location_id": "L123",
|
| 450 |
+
"latitude": 40.7829,
|
| 451 |
+
"longitude": -73.9654,
|
| 452 |
+
"species_recorded": 237,
|
| 453 |
+
"latest_obs_date": "2025-11-22 15:45"
|
| 454 |
+
}
|
| 455 |
+
],
|
| 456 |
+
"count": 3,
|
| 457 |
+
"location": {"lat": 40.7829, "lng": -73.9654},
|
| 458 |
+
"radius_km": 25
|
| 459 |
+
}
|
| 460 |
+
```
|
| 461 |
+
|
| 462 |
+
**Use Cases:**
|
| 463 |
+
- Find popular birding locations near user
|
| 464 |
+
- Discover new places to go birdwatching
|
| 465 |
+
- See historical bird diversity at hotspots
|
| 466 |
+
|
| 467 |
+
---
|
| 468 |
+
|
| 469 |
+
### Tool 4: get_location_birds
|
| 470 |
+
|
| 471 |
+
See ALL birds currently being reported at a location.
|
| 472 |
+
|
| 473 |
+
**Input:**
|
| 474 |
+
```python
|
| 475 |
+
get_location_birds(
|
| 476 |
+
latitude: float,
|
| 477 |
+
longitude: float,
|
| 478 |
+
radius_km: int = 50,
|
| 479 |
+
max_results: int = 50
|
| 480 |
+
)
|
| 481 |
+
```
|
| 482 |
+
|
| 483 |
+
**Example:**
|
| 484 |
+
```bash
|
| 485 |
+
curl -X POST http://localhost:8000/tools/get_location_birds \
|
| 486 |
+
-H "Content-Type: application/json" \
|
| 487 |
+
-d '{
|
| 488 |
+
"latitude": 40.7829,
|
| 489 |
+
"longitude": -73.9654,
|
| 490 |
+
"radius_km": 5,
|
| 491 |
+
"max_results": 50
|
| 492 |
+
}'
|
| 493 |
+
```
|
| 494 |
+
|
| 495 |
+
**Response:**
|
| 496 |
+
```json
|
| 497 |
+
{
|
| 498 |
+
"status": "success",
|
| 499 |
+
"data": [
|
| 500 |
+
{
|
| 501 |
+
"common_name": "Canada Goose",
|
| 502 |
+
"scientific_name": "Branta canadensis",
|
| 503 |
+
"species_code": "cangoo",
|
| 504 |
+
"location": "Central Park - Reservoir",
|
| 505 |
+
"date": "2025-11-22 14:30",
|
| 506 |
+
"count": 12,
|
| 507 |
+
"latitude": 40.7829,
|
| 508 |
+
"longitude": -73.9654
|
| 509 |
+
}
|
| 510 |
+
],
|
| 511 |
+
"count": 20,
|
| 512 |
+
"unique_species": 12,
|
| 513 |
+
"location": {"lat": 40.7829, "lng": -73.9654},
|
| 514 |
+
"top_birds": [
|
| 515 |
+
{"species": "Canada Goose", "observations": 5},
|
| 516 |
+
{"species": "American Robin", "observations": 3}
|
| 517 |
+
]
|
| 518 |
+
}
|
| 519 |
+
```
|
| 520 |
+
|
| 521 |
+
**Use Cases:**
|
| 522 |
+
- Explore all bird activity in an area
|
| 523 |
+
- See what's being seen right now
|
| 524 |
+
- Get overview of biodiversity at a location
|
| 525 |
+
|
| 526 |
+
---
|
| 527 |
+
|
| 528 |
+
### Tool 5: get_species_info
|
| 529 |
+
|
| 530 |
+
Get detailed taxonomy and metadata for a species.
|
| 531 |
+
|
| 532 |
+
**Input:**
|
| 533 |
+
```python
|
| 534 |
+
get_species_info(
|
| 535 |
+
species_code: str # "norcar", "blueja", etc.
|
| 536 |
+
)
|
| 537 |
+
```
|
| 538 |
+
|
| 539 |
+
**Example:**
|
| 540 |
+
```bash
|
| 541 |
+
curl -X POST http://localhost:8000/tools/get_species_info \
|
| 542 |
+
-H "Content-Type: application/json" \
|
| 543 |
+
-d '{"species_code": "norcar"}'
|
| 544 |
+
```
|
| 545 |
+
|
| 546 |
+
**Response:**
|
| 547 |
+
```json
|
| 548 |
+
{
|
| 549 |
+
"status": "success",
|
| 550 |
+
"data": {
|
| 551 |
+
"common_name": "Northern Cardinal",
|
| 552 |
+
"scientific_name": "Cardinalis cardinalis",
|
| 553 |
+
"species_code": "norcar",
|
| 554 |
+
"family": "Cardinals and Allies",
|
| 555 |
+
"family_sci_name": "Cardinalidae",
|
| 556 |
+
"order": "Passeriformes",
|
| 557 |
+
"category": "species"
|
| 558 |
+
},
|
| 559 |
+
"species_code": "norcar"
|
| 560 |
+
}
|
| 561 |
+
```
|
| 562 |
+
|
| 563 |
+
**Use Cases:**
|
| 564 |
+
- Get scientific name for a bird
|
| 565 |
+
- Understand taxonomic classification
|
| 566 |
+
- Enrich bird data with metadata
|
| 567 |
+
|
| 568 |
+
---
|
| 569 |
+
|
| 570 |
+
### Tool 6: get_notable_sightings
|
| 571 |
+
|
| 572 |
+
Find rare or unusual birds recently reported in a region.
|
| 573 |
+
|
| 574 |
+
**Input:**
|
| 575 |
+
```python
|
| 576 |
+
get_notable_sightings(
|
| 577 |
+
region_code: str = "US", # "US", "US-NY", "CA-ON", etc.
|
| 578 |
+
max_results: int = 10
|
| 579 |
+
)
|
| 580 |
+
```
|
| 581 |
+
|
| 582 |
+
**Example:**
|
| 583 |
+
```bash
|
| 584 |
+
curl -X POST http://localhost:8000/tools/get_notable_sightings \
|
| 585 |
+
-H "Content-Type: application/json" \
|
| 586 |
+
-d '{"region_code": "US-NY", "max_results": 10}'
|
| 587 |
+
```
|
| 588 |
+
|
| 589 |
+
**Response:**
|
| 590 |
+
```json
|
| 591 |
+
{
|
| 592 |
+
"status": "success",
|
| 593 |
+
"data": [
|
| 594 |
+
{
|
| 595 |
+
"common_name": "Tufted Puffin",
|
| 596 |
+
"scientific_name": "Fratercula cirrhata",
|
| 597 |
+
"species_code": "tufpuf",
|
| 598 |
+
"location": "Jones Beach",
|
| 599 |
+
"location_id": "L456",
|
| 600 |
+
"date": "2025-11-20 10:30",
|
| 601 |
+
"count": 1,
|
| 602 |
+
"latitude": 40.5891,
|
| 603 |
+
"longitude": -72.7868
|
| 604 |
+
}
|
| 605 |
+
],
|
| 606 |
+
"count": 3,
|
| 607 |
+
"region_code": "US-NY"
|
| 608 |
+
}
|
| 609 |
+
```
|
| 610 |
+
|
| 611 |
+
**Use Cases:**
|
| 612 |
+
- Discover rare birds in a region
|
| 613 |
+
- Find exciting sighting opportunities
|
| 614 |
+
- Monitor unusual bird movements
|
| 615 |
+
|
| 616 |
+
---
|
| 617 |
+
|
| 618 |
+
### Tool 7: analyze_location
|
| 619 |
+
|
| 620 |
+
Comprehensive analysis combining all bird data for a location.
|
| 621 |
+
|
| 622 |
+
**Input:**
|
| 623 |
+
```python
|
| 624 |
+
analyze_location(
|
| 625 |
+
latitude: float,
|
| 626 |
+
longitude: float,
|
| 627 |
+
radius_km: int = 50
|
| 628 |
+
)
|
| 629 |
+
```
|
| 630 |
+
|
| 631 |
+
**Example:**
|
| 632 |
+
```bash
|
| 633 |
+
curl -X POST http://localhost:8000/tools/analyze_location \
|
| 634 |
+
-H "Content-Type: application/json" \
|
| 635 |
+
-d '{
|
| 636 |
+
"latitude": 40.7829,
|
| 637 |
+
"longitude": -73.9654,
|
| 638 |
+
"radius_km": 5
|
| 639 |
+
}'
|
| 640 |
+
```
|
| 641 |
+
|
| 642 |
+
**Response:**
|
| 643 |
+
```json
|
| 644 |
+
{
|
| 645 |
+
"status": "success",
|
| 646 |
+
"data": {
|
| 647 |
+
"location": {
|
| 648 |
+
"latitude": 40.7829,
|
| 649 |
+
"longitude": -73.9654,
|
| 650 |
+
"radius_km": 5
|
| 651 |
+
},
|
| 652 |
+
"sightings": [...],
|
| 653 |
+
"hotspots": [...],
|
| 654 |
+
"summary": {
|
| 655 |
+
"total_sightings": 42,
|
| 656 |
+
"unique_species": 18,
|
| 657 |
+
"total_hotspots": 3,
|
| 658 |
+
"top_species": [
|
| 659 |
+
{"name": "Canada Goose", "observations": 8},
|
| 660 |
+
{"name": "American Robin", "observations": 5}
|
| 661 |
+
]
|
| 662 |
+
}
|
| 663 |
+
}
|
| 664 |
+
}
|
| 665 |
+
```
|
| 666 |
+
|
| 667 |
+
**Use Cases:**
|
| 668 |
+
- Get complete birding report for a location
|
| 669 |
+
- Plan comprehensive birding trips
|
| 670 |
+
- Understand area biodiversity
|
| 671 |
+
|
| 672 |
+
## 🧪 Testing
|
| 673 |
+
|
| 674 |
+
### Comprehensive Test Suite
|
| 675 |
+
|
| 676 |
+
The test suite validates **all 7 tools** across **3 different modes**:
|
| 677 |
+
|
| 678 |
+
```bash
|
| 679 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 680 |
+
python test_ebird_mcp.py
|
| 681 |
+
```
|
| 682 |
+
|
| 683 |
+
**What gets tested:**
|
| 684 |
+
|
| 685 |
+
1. **Part 1: Direct Python Function Tests** (2 tests)
|
| 686 |
+
- Validates functions work when called directly
|
| 687 |
+
- Quick sanity check without MCP overhead
|
| 688 |
+
- Tests: `search_species`, `find_hotspots_nearby`
|
| 689 |
+
|
| 690 |
+
2. **Part 2: MCP Stdio Transport Tests** (3 tests)
|
| 691 |
+
- ✅ **Proper MCP protocol via stdio**
|
| 692 |
+
- Uses FastMCP's `StdioTransport` client
|
| 693 |
+
- Validates MCP server works as subprocess
|
| 694 |
+
- Tests: List tools, call `search_species`, call `find_hotspots_nearby`
|
| 695 |
+
|
| 696 |
+
3. **Part 3: MCP Streamable-HTTP Transport Tests** (3 tests)
|
| 697 |
+
- ✅ **Proper MCP protocol via HTTP**
|
| 698 |
+
- Uses FastMCP's `StreamableHttpTransport` client
|
| 699 |
+
- Validates MCP server works over HTTP
|
| 700 |
+
- Tests: List tools, call `search_species`, call `find_hotspots_nearby`
|
| 701 |
+
|
| 702 |
+
**Expected Output:**
|
| 703 |
+
```
|
| 704 |
+
🦅 eBird MCP Server - Comprehensive Test Suite
|
| 705 |
+
══════════════════════════════════════════════════════════════════════
|
| 706 |
+
|
| 707 |
+
PART 1: Direct Python Function Tests (Non-MCP)
|
| 708 |
+
──────────────────────────────────────────────────────────────────────
|
| 709 |
+
✅ Found 3 cardinals
|
| 710 |
+
✅ Found 5 hotspots
|
| 711 |
+
ℹ️ Direct tests: 2/2 passed
|
| 712 |
+
|
| 713 |
+
PART 2: MCP Stdio Transport Tests
|
| 714 |
+
──────────────────────────────────────────────────────────────────────
|
| 715 |
+
✅ Found all 7 MCP tools
|
| 716 |
+
✅ MCP call successful: Found 3 species
|
| 717 |
+
✅ MCP call successful: Found 3 hotspots
|
| 718 |
+
ℹ️ MCP stdio tests: 3/3 passed
|
| 719 |
+
|
| 720 |
+
PART 3: MCP Streamable-HTTP Transport Tests
|
| 721 |
+
──────────────────────────────────────────────────────────────────────
|
| 722 |
+
✅ Found all 7 MCP tools via HTTP
|
| 723 |
+
✅ MCP HTTP call successful: Found 3 species
|
| 724 |
+
✅ MCP HTTP call successful: Found 3 hotspots
|
| 725 |
+
ℹ️ MCP HTTP tests: 3/3 passed
|
| 726 |
+
|
| 727 |
+
Final Test Summary
|
| 728 |
+
══════════════════════════════════════════════════════════════════════
|
| 729 |
+
Total Passed: 8/8
|
| 730 |
+
Direct tests: 2/2
|
| 731 |
+
MCP stdio tests: 3/3
|
| 732 |
+
MCP HTTP tests: 3/3
|
| 733 |
+
|
| 734 |
+
✅ All tests passed! ✨
|
| 735 |
+
ℹ️ Your MCP server is fully functional for the hackathon! 🎉
|
| 736 |
+
```
|
| 737 |
+
|
| 738 |
+
**Test Options:**
|
| 739 |
+
```bash
|
| 740 |
+
# Run all tests (default)
|
| 741 |
+
python test_ebird_mcp.py
|
| 742 |
+
|
| 743 |
+
# Skip direct function tests
|
| 744 |
+
python test_ebird_mcp.py --skip-direct
|
| 745 |
+
|
| 746 |
+
# Skip stdio MCP tests
|
| 747 |
+
python test_ebird_mcp.py --skip-stdio
|
| 748 |
+
|
| 749 |
+
# Skip HTTP MCP tests
|
| 750 |
+
python test_ebird_mcp.py --skip-http
|
| 751 |
+
|
| 752 |
+
# Run only MCP protocol tests
|
| 753 |
+
python test_ebird_mcp.py --skip-direct
|
| 754 |
+
```
|
| 755 |
+
|
| 756 |
+
**Why this matters for the hackathon:**
|
| 757 |
+
- ✅ Validates proper MCP protocol usage (required for Track 2)
|
| 758 |
+
- ✅ Tests both stdio and HTTP transports
|
| 759 |
+
- ✅ Ensures compatibility with MCP clients
|
| 760 |
+
- ✅ Proves tools work in real-world scenarios
|
| 761 |
+
|
| 762 |
+
## 🔗 Integration Points
|
| 763 |
+
|
| 764 |
+
### With LangGraph Agent (Phase 3) - RECOMMENDED
|
| 765 |
+
|
| 766 |
+
**Use MCP Stdio Transport for proper protocol compliance:**
|
| 767 |
+
|
| 768 |
+
```python
|
| 769 |
+
from fastmcp.client import Client
|
| 770 |
+
from fastmcp.client.transports import StdioTransport
|
| 771 |
+
|
| 772 |
+
# Create MCP client with stdio transport
|
| 773 |
+
transport = StdioTransport(
|
| 774 |
+
command="python",
|
| 775 |
+
args=["ebird_tools.py"]
|
| 776 |
+
)
|
| 777 |
+
client = Client(transport)
|
| 778 |
+
|
| 779 |
+
# Use in async context
|
| 780 |
+
async with client:
|
| 781 |
+
# List available tools
|
| 782 |
+
tools = await client.list_tools()
|
| 783 |
+
print(f"Available tools: {[t.name for t in tools]}")
|
| 784 |
+
|
| 785 |
+
# Call a tool
|
| 786 |
+
result = await client.call_tool(
|
| 787 |
+
"search_species",
|
| 788 |
+
arguments={"search_term": "cardinal", "max_results": 5}
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
# Parse response
|
| 792 |
+
import json
|
| 793 |
+
data = json.loads(result.content[0].text)
|
| 794 |
+
print(f"Found {data['count']} species")
|
| 795 |
+
```
|
| 796 |
+
|
| 797 |
+
**Why use stdio for agents:**
|
| 798 |
+
- ✅ Proper MCP protocol (required for hackathon Track 2)
|
| 799 |
+
- ✅ StdioTransport manages subprocess lifecycle
|
| 800 |
+
- ✅ No HTTP overhead
|
| 801 |
+
- ✅ Best for agent-to-agent communication
|
| 802 |
+
|
| 803 |
+
### With Gradio Dashboard (Phase 4)
|
| 804 |
+
|
| 805 |
+
**Option 1: Use MCP Streamable-HTTP Transport (RECOMMENDED)**
|
| 806 |
+
|
| 807 |
+
```python
|
| 808 |
+
from fastmcp.client import Client
|
| 809 |
+
from fastmcp.client.transports import StreamableHttpTransport
|
| 810 |
+
import asyncio
|
| 811 |
+
import json
|
| 812 |
+
|
| 813 |
+
async def call_ebird_tool(tool_name: str, params: dict):
|
| 814 |
+
"""Call eBird MCP tool via proper MCP protocol."""
|
| 815 |
+
transport = StreamableHttpTransport(url="http://localhost:8000/mcp")
|
| 816 |
+
client = Client(transport)
|
| 817 |
+
|
| 818 |
+
async with client:
|
| 819 |
+
result = await client.call_tool(tool_name, arguments=params)
|
| 820 |
+
data = json.loads(result.content[0].text)
|
| 821 |
+
return data
|
| 822 |
+
|
| 823 |
+
# Use in Gradio
|
| 824 |
+
def gradio_search_birds(search_term: str):
|
| 825 |
+
"""Gradio callback function."""
|
| 826 |
+
result = asyncio.run(call_ebird_tool(
|
| 827 |
+
"search_species",
|
| 828 |
+
{"search_term": search_term, "max_results": 10}
|
| 829 |
+
))
|
| 830 |
+
return result
|
| 831 |
+
```
|
| 832 |
+
|
| 833 |
+
**Option 2: Direct HTTP calls (Non-MCP, for quick testing)**
|
| 834 |
+
|
| 835 |
+
```python
|
| 836 |
+
import requests
|
| 837 |
+
|
| 838 |
+
# Start server with: python run_ebird_http.py
|
| 839 |
+
response = requests.post(
|
| 840 |
+
"http://localhost:8000/tools/search_species",
|
| 841 |
+
json={"search_term": "cardinal", "max_results": 10}
|
| 842 |
+
)
|
| 843 |
+
result = response.json()
|
| 844 |
+
```
|
| 845 |
+
|
| 846 |
+
**Note:** Option 2 bypasses MCP protocol. Use Option 1 for hackathon compliance.
|
| 847 |
+
|
| 848 |
+
### With Claude Desktop or Other MCP Clients
|
| 849 |
+
|
| 850 |
+
Add to your Claude Desktop config (`claude_desktop_config.json`):
|
| 851 |
+
|
| 852 |
+
```json
|
| 853 |
+
{
|
| 854 |
+
"mcpServers": {
|
| 855 |
+
"ebird": {
|
| 856 |
+
"command": "python",
|
| 857 |
+
"args": ["/Users/jacobbinder/Desktop/hackathon/hackathon_draft/ebird_tools.py"]
|
| 858 |
+
}
|
| 859 |
+
}
|
| 860 |
+
}
|
| 861 |
+
```
|
| 862 |
+
|
| 863 |
+
Claude Desktop will connect via stdio transport and have access to all 7 tools.
|
| 864 |
+
|
| 865 |
+
## 📊 Performance
|
| 866 |
+
|
| 867 |
+
| Tool | Time | Notes |
|
| 868 |
+
|------|------|-------|
|
| 869 |
+
| search_species | 1-2s | Full taxonomy lookup |
|
| 870 |
+
| get_recent_sightings_nearby | 1-2s | API call |
|
| 871 |
+
| find_hotspots_nearby | 1-2s | API call |
|
| 872 |
+
| get_location_birds | 1-2s | API call |
|
| 873 |
+
| get_species_info | 0.5-1s | Taxonomy lookup |
|
| 874 |
+
| get_notable_sightings | 1-2s | API call |
|
| 875 |
+
| analyze_location | 3-5s | Multiple API calls |
|
| 876 |
+
|
| 877 |
+
**Target response time:** 5-10 seconds for agent queries
|
| 878 |
+
|
| 879 |
+
## 🛠️ Troubleshooting
|
| 880 |
+
|
| 881 |
+
### "EBIRD_API_KEY not found"
|
| 882 |
+
```bash
|
| 883 |
+
# Make sure .env file has your API key
|
| 884 |
+
echo "EBIRD_API_KEY=your-actual-key" > /Users/jacobbinder/Desktop/hackathon/hackathon_draft/.env
|
| 885 |
+
```
|
| 886 |
+
|
| 887 |
+
### "Connection error"
|
| 888 |
+
Check internet connection and eBird API status
|
| 889 |
+
|
| 890 |
+
### "Tool not found" (HTTP)
|
| 891 |
+
Ensure tool name matches exactly (lowercase with underscores)
|
| 892 |
+
|
| 893 |
+
### Rate limiting
|
| 894 |
+
Server automatically enforces 100ms delay between API calls
|
| 895 |
+
|
| 896 |
+
## 📝 Architecture
|
| 897 |
+
|
| 898 |
+
```
|
| 899 |
+
┌─────────────────────────────────┐
|
| 900 |
+
│ eBird MCP Server │
|
| 901 |
+
│ (ebird_tools.py) │
|
| 902 |
+
├─────────────────────────────────┤
|
| 903 |
+
│ 7 Tools (all return JSON) │
|
| 904 |
+
├─────────────────────────────────┤
|
| 905 |
+
│ Transport Layer (choosable) │
|
| 906 |
+
├──────────────────┬──────────────┤
|
| 907 |
+
│ Stdio (CLI) │ HTTP (Web) │
|
| 908 |
+
├──────────────────┼──────────────┤
|
| 909 |
+
│ stdio transport │ FastAPI app │
|
| 910 |
+
│ (subprocess) │ (Uvicorn) │
|
| 911 |
+
└──────────────────┴──────────────┘
|
| 912 |
+
↓ ↓
|
| 913 |
+
LangGraph Gradio
|
| 914 |
+
Agent Dashboard
|
| 915 |
+
```
|
| 916 |
+
|
| 917 |
+
## 📦 Files
|
| 918 |
+
|
| 919 |
+
- **`ebird_tools.py`** (800+ lines) - Main MCP server with dual transport support
|
| 920 |
+
- Run as stdio: `python ebird_tools.py`
|
| 921 |
+
- Run as HTTP: `python ebird_tools.py --http --port 8000`
|
| 922 |
+
- Uses FastMCP's built-in transport system
|
| 923 |
+
|
| 924 |
+
- **`run_ebird_http.py`** (150 lines) - Direct HTTP wrapper (non-MCP)
|
| 925 |
+
- Bypasses MCP protocol for quick testing
|
| 926 |
+
- Exposes `/tools/{tool_name}` endpoints
|
| 927 |
+
- Use for dashboard building when MCP overhead not needed
|
| 928 |
+
|
| 929 |
+
- **`test_ebird_mcp.py`** (470+ lines) - Comprehensive test suite
|
| 930 |
+
- Tests 3 modes: Direct, Stdio MCP, HTTP MCP
|
| 931 |
+
- Validates proper MCP protocol compliance
|
| 932 |
+
- 8 total tests with colored output
|
| 933 |
+
|
| 934 |
+
- **`EBIRD_MCP_README.md`** - This file (complete documentation)
|
| 935 |
+
- **`QUICK_START.md`** - Quick reference guide
|
| 936 |
+
- **`PHASE2_SUMMARY.md`** - Implementation summary
|
| 937 |
+
|
| 938 |
+
## ✨ Next Steps
|
| 939 |
+
|
| 940 |
+
1. ✅ **Phase 2: eBird MCP Server (COMPLETE)**
|
| 941 |
+
- ✅ 7 tools implemented
|
| 942 |
+
- ✅ Dual transport support (stdio + streamable-http)
|
| 943 |
+
- ✅ Full MCP protocol compliance
|
| 944 |
+
- ✅ Comprehensive test suite (8/8 passing)
|
| 945 |
+
- ✅ Ready for hackathon Track 2
|
| 946 |
+
|
| 947 |
+
2. ⏳ **Phase 3: LangGraph Agent**
|
| 948 |
+
- Connect both MCP servers (bird classifier + eBird data)
|
| 949 |
+
- Use stdio transport for agent-to-agent communication
|
| 950 |
+
- Build reasoning chains combining image classification + location data
|
| 951 |
+
|
| 952 |
+
3. ⏳ **Phase 4: Gradio Dashboard**
|
| 953 |
+
- Use streamable-http transport for UI integration
|
| 954 |
+
- Dual interface: Chat with agent + Direct tool access
|
| 955 |
+
- Deploy to HuggingFace Spaces
|
| 956 |
+
|
| 957 |
+
4. ⏳ **Phase 5: Submission**
|
| 958 |
+
- Tag with `mcp-in-action-track-consumer` or appropriate category
|
| 959 |
+
- Add demo video showing MCP protocol usage
|
| 960 |
+
- Add social media post link
|
| 961 |
+
|
| 962 |
+
## 📄 License
|
| 963 |
+
|
| 964 |
+
Part of BirdScope AI hackathon submission
|
| 965 |
+
|
| 966 |
+
## 🤝 Support
|
| 967 |
+
|
| 968 |
+
For issues, check:
|
| 969 |
+
1. `.env` file has valid EBIRD_API_KEY
|
| 970 |
+
2. Internet connection is working
|
| 971 |
+
3. eBird API status (https://ebird.org)
|
| 972 |
+
4. Test suite results: `python test_ebird_mcp.py`
|
examples/another_bird.jpg
ADDED
|
Git LFS Details
|
examples/b64_helper.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
|
| 5 |
+
# Load and optimize image
|
| 6 |
+
image = Image.open("examples/blue_jay.jpg")
|
| 7 |
+
|
| 8 |
+
# Resize if too large (max 800px on longest side)
|
| 9 |
+
max_size = 800
|
| 10 |
+
if max(image.size) > max_size:
|
| 11 |
+
ratio = max_size / max(image.size)
|
| 12 |
+
new_size = tuple(int(dim * ratio) for dim in image.size)
|
| 13 |
+
image = image.resize(new_size, Image.Resampling.LANCZOS)
|
| 14 |
+
|
| 15 |
+
# Convert to JPEG with compression
|
| 16 |
+
buffer = BytesIO()
|
| 17 |
+
image.convert("RGB").save(buffer, format="JPEG", quality=85, optimize=True)
|
| 18 |
+
image_bytes = buffer.getvalue()
|
| 19 |
+
|
| 20 |
+
# Encode to base64
|
| 21 |
+
image_base64 = base64.b64encode(image_bytes).decode()
|
| 22 |
+
|
| 23 |
+
# Print full base64 for copying
|
| 24 |
+
print("Full base64 string:")
|
| 25 |
+
print(image_base64)
|
| 26 |
+
|
| 27 |
+
# Also save to file for easy copying
|
| 28 |
+
with open("examples/blue_jay_base64.txt", "w") as out:
|
| 29 |
+
out.write(image_base64)
|
| 30 |
+
|
| 31 |
+
print(f"\n✅ Saved to examples/blue_jay_base64.txt")
|
| 32 |
+
print(f"📏 Size: {len(image_base64)} characters")
|
| 33 |
+
print(f"📦 Optimized payload: ~{len(image_base64) // 1024}KB")
|
examples/bird_example_1.jpg
ADDED
|
Git LFS Details
|
examples/bird_example_2.jpg
ADDED
|
Git LFS Details
|
examples/bird_example_3.jpg
ADDED
|
langgraph.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://langgra.ph/schema.json",
|
| 3 |
+
"dependencies": ["."],
|
| 4 |
+
"graphs": {
|
| 5 |
+
"bird_agent": "./agent/graph.py:graph"
|
| 6 |
+
},
|
| 7 |
+
"env": ".env"
|
| 8 |
+
}
|
langgraph_agent/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
LangGraph Bird Classification Agents
|
| 3 |
+
|
| 4 |
+
Simple, modular agents for bird identification using MCP servers.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .config import AgentConfig
|
| 8 |
+
from .agents import AgentFactory
|
| 9 |
+
from .mcp_clients import MCPClientManager
|
| 10 |
+
from .prompts import get_prompt_for_agent_type, BIRDSCOPE_AI_PROMPT, NUTHATCH_BIRDSCOPE_PROMPT
|
| 11 |
+
|
| 12 |
+
__version__ = "0.1.0"
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"AgentConfig",
|
| 16 |
+
"AgentFactory",
|
| 17 |
+
"MCPClientManager",
|
| 18 |
+
"get_prompt_for_agent_type",
|
| 19 |
+
"BIRDSCOPE_AI_PROMPT",
|
| 20 |
+
"NUTHATCH_BIRDSCOPE_PROMPT"
|
| 21 |
+
]
|
langgraph_agent/__main__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Entry point for running langgraph_agent as a module.
|
| 3 |
+
Allows: python -m langgraph_agent [command]
|
| 4 |
+
"""
|
| 5 |
+
from .main import main
|
| 6 |
+
|
| 7 |
+
if __name__ == "__main__":
|
| 8 |
+
main()
|
langgraph_agent/agents.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent creation and configuration.
|
| 3 |
+
|
| 4 |
+
Unified agent factory using subagent architecture for all modes.
|
| 5 |
+
"""
|
| 6 |
+
from langchain_openai import ChatOpenAI
|
| 7 |
+
|
| 8 |
+
from .config import AgentConfig
|
| 9 |
+
from .mcp_clients import MCPClientManager
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class AgentFactory:
|
| 13 |
+
"""Factory for creating agents using unified subagent architecture."""
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
async def create_subagent_orchestrator(
|
| 17 |
+
model: str,
|
| 18 |
+
api_key: str,
|
| 19 |
+
provider: str,
|
| 20 |
+
mode: str = "Single Agent (All Tools)"
|
| 21 |
+
):
|
| 22 |
+
"""
|
| 23 |
+
Create agent using subagent architecture (always uses subagent system).
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
model: LLM model name
|
| 27 |
+
api_key: API key for the provider
|
| 28 |
+
provider: LLM provider ("openai" or "huggingface")
|
| 29 |
+
mode: Agent mode (e.g., "Single Agent (All Tools)", "Specialized Subagents (3 Specialists)")
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
Configured agent (single subagent or router workflow)
|
| 33 |
+
"""
|
| 34 |
+
from .subagent_config import SubAgentConfig
|
| 35 |
+
from .subagent_supervisor import create_supervisor_workflow
|
| 36 |
+
from .subagent_factory import SubAgentFactory
|
| 37 |
+
from langchain_openai import ChatOpenAI
|
| 38 |
+
from langchain_anthropic import ChatAnthropic
|
| 39 |
+
|
| 40 |
+
# Get mode configuration
|
| 41 |
+
mode_config = SubAgentConfig.get_mode_config(mode)
|
| 42 |
+
print(f"[AGENT]: Creating agent in '{mode}' mode")
|
| 43 |
+
|
| 44 |
+
# Create LLM based on provider
|
| 45 |
+
if provider == "huggingface":
|
| 46 |
+
llm = ChatOpenAI(
|
| 47 |
+
base_url="https://router.huggingface.co/v1",
|
| 48 |
+
api_key=api_key,
|
| 49 |
+
model=model,
|
| 50 |
+
temperature=AgentConfig.HF_TEMPERATURE,
|
| 51 |
+
streaming=True
|
| 52 |
+
)
|
| 53 |
+
elif provider == "anthropic":
|
| 54 |
+
llm = ChatAnthropic(
|
| 55 |
+
model=model,
|
| 56 |
+
api_key=api_key,
|
| 57 |
+
temperature=AgentConfig.ANTHROPIC_TEMPERATURE,
|
| 58 |
+
streaming=True
|
| 59 |
+
)
|
| 60 |
+
else: # openai
|
| 61 |
+
llm = ChatOpenAI(
|
| 62 |
+
model=model,
|
| 63 |
+
api_key=api_key,
|
| 64 |
+
temperature=AgentConfig.OPENAI_TEMPERATURE,
|
| 65 |
+
streaming=True
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Get all MCP tools
|
| 69 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 70 |
+
tools = await MCPClientManager.get_tools(client)
|
| 71 |
+
|
| 72 |
+
# Create agent based on mode
|
| 73 |
+
if mode_config["use_router"]:
|
| 74 |
+
# Multi-agent mode: create router with specialists
|
| 75 |
+
print(f"[AGENT]: Creating supervisor with subagents: {mode_config['subagents']}")
|
| 76 |
+
workflow = await create_supervisor_workflow(tools, llm)
|
| 77 |
+
return workflow
|
| 78 |
+
else:
|
| 79 |
+
# Single agent mode: create one subagent directly
|
| 80 |
+
subagent_name = mode_config["subagents"][0]
|
| 81 |
+
print(f"[AGENT]: Creating single subagent: {subagent_name}")
|
| 82 |
+
|
| 83 |
+
# Create agent with memory for streaming support
|
| 84 |
+
from langchain.agents import create_agent
|
| 85 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 86 |
+
|
| 87 |
+
# create_agent auto-compiles, so pass checkpointer and name directly
|
| 88 |
+
# Filter tools based on subagent configuration
|
| 89 |
+
subagent_tools = SubAgentConfig.get_subagent_definitions()["generalist"]["tools"]
|
| 90 |
+
filtered_tools = [tool for tool in tools if tool.name in subagent_tools]
|
| 91 |
+
print(f"[AGENT]: Filtered {len(filtered_tools)} tools for {subagent_name}: {[t.name for t in filtered_tools]}")
|
| 92 |
+
|
| 93 |
+
agent = create_agent(
|
| 94 |
+
model=llm,
|
| 95 |
+
tools=filtered_tools,
|
| 96 |
+
system_prompt=SubAgentConfig.get_subagent_definitions()["generalist"]["prompt"],
|
| 97 |
+
checkpointer=InMemorySaver(),
|
| 98 |
+
name=subagent_name
|
| 99 |
+
)
|
| 100 |
+
return agent
|
langgraph_agent/agents.py.legacy
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent creation and configuration.
|
| 3 |
+
"""
|
| 4 |
+
from typing import Optional
|
| 5 |
+
from langchain.agents import create_agent
|
| 6 |
+
from langchain_openai import ChatOpenAI
|
| 7 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 8 |
+
|
| 9 |
+
from .config import AgentConfig
|
| 10 |
+
from .prompts import get_prompt_for_agent_type, NUTHATCH_BIRDSCOPE_PROMPT
|
| 11 |
+
from .mcp_clients import MCPClientManager
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class AgentFactory:
|
| 15 |
+
"""Factory for creating different types of bird classification agents."""
|
| 16 |
+
|
| 17 |
+
@staticmethod
|
| 18 |
+
async def create_classifier_agent(
|
| 19 |
+
model_name: Optional[str] = None,
|
| 20 |
+
temperature: Optional[float] = None,
|
| 21 |
+
with_memory: bool = False
|
| 22 |
+
):
|
| 23 |
+
"""
|
| 24 |
+
Create a basic bird classifier agent (Modal only).
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
model_name: LLM model to use (defaults to config)
|
| 28 |
+
temperature: Model temperature (defaults to config)
|
| 29 |
+
with_memory: Enable conversation memory
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
Configured LangGraph agent
|
| 33 |
+
"""
|
| 34 |
+
# Validate config
|
| 35 |
+
AgentConfig.validate()
|
| 36 |
+
|
| 37 |
+
# Create MCP client
|
| 38 |
+
client = await MCPClientManager.create_classifier_client()
|
| 39 |
+
tools = await MCPClientManager.get_tools(client)
|
| 40 |
+
|
| 41 |
+
# Create model
|
| 42 |
+
model = ChatOpenAI(
|
| 43 |
+
model=model_name or AgentConfig.DEFAULT_MODEL,
|
| 44 |
+
temperature=temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# Get system prompt
|
| 48 |
+
system_prompt = get_prompt_for_agent_type("classifier")
|
| 49 |
+
|
| 50 |
+
# Create agent
|
| 51 |
+
agent_kwargs = {
|
| 52 |
+
"model": model,
|
| 53 |
+
"tools": tools,
|
| 54 |
+
"system_prompt": system_prompt
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
# Add memory if requested
|
| 58 |
+
if with_memory:
|
| 59 |
+
agent_kwargs["checkpointer"] = InMemorySaver()
|
| 60 |
+
|
| 61 |
+
print("[STATUS]: Creating LangGraph agent...")
|
| 62 |
+
agent = create_agent(**agent_kwargs)
|
| 63 |
+
|
| 64 |
+
print("[SUCCESS]: Agent ready!\n")
|
| 65 |
+
return agent
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
async def create_multi_server_agent(
|
| 69 |
+
model_name: Optional[str] = None,
|
| 70 |
+
temperature: Optional[float] = None,
|
| 71 |
+
with_memory: bool = True # Memory recommended for multi-server
|
| 72 |
+
):
|
| 73 |
+
"""
|
| 74 |
+
Create agent with both Modal classifier and eBird tools.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
model_name: LLM model to use (defaults to config)
|
| 78 |
+
temperature: Model temperature (defaults to config)
|
| 79 |
+
with_memory: Enable conversation memory (default: True)
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
Configured LangGraph agent with all tools
|
| 83 |
+
"""
|
| 84 |
+
# Validate config
|
| 85 |
+
AgentConfig.validate()
|
| 86 |
+
|
| 87 |
+
# Create MCP client with both servers
|
| 88 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 89 |
+
tools = await MCPClientManager.get_tools(client)
|
| 90 |
+
|
| 91 |
+
# Create model
|
| 92 |
+
model = ChatOpenAI(
|
| 93 |
+
model=model_name or AgentConfig.DEFAULT_MODEL,
|
| 94 |
+
temperature=temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Get system prompt
|
| 98 |
+
system_prompt = get_prompt_for_agent_type("multi_server")
|
| 99 |
+
|
| 100 |
+
# Create agent
|
| 101 |
+
agent_kwargs = {
|
| 102 |
+
"model": model,
|
| 103 |
+
"tools": tools,
|
| 104 |
+
"system_prompt": system_prompt
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
# Add memory
|
| 108 |
+
if with_memory:
|
| 109 |
+
agent_kwargs["checkpointer"] = InMemorySaver()
|
| 110 |
+
|
| 111 |
+
print("[STATUS]: Creating multi-server LangGraph agent...")
|
| 112 |
+
agent = create_agent(**agent_kwargs)
|
| 113 |
+
|
| 114 |
+
print("[SUCCESS]: Agent ready with all tools!\n")
|
| 115 |
+
return agent
|
| 116 |
+
|
| 117 |
+
@staticmethod
|
| 118 |
+
async def create_streaming_agent(
|
| 119 |
+
model_name: Optional[str] = None,
|
| 120 |
+
temperature: Optional[float] = None,
|
| 121 |
+
system_prompt: Optional[str] = None,
|
| 122 |
+
with_memory: bool = True
|
| 123 |
+
):
|
| 124 |
+
"""
|
| 125 |
+
Create streaming multi-server agent with custom system prompt.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
model_name: LLM model (default: gpt-4o-mini)
|
| 129 |
+
temperature: Sampling temperature (default: 0)
|
| 130 |
+
system_prompt: Custom system message
|
| 131 |
+
with_memory: Enable conversation memory
|
| 132 |
+
"""
|
| 133 |
+
print("[STATUS]: Creating streaming agent...")
|
| 134 |
+
|
| 135 |
+
# Use defaults if not provided
|
| 136 |
+
model_name = model_name or AgentConfig.DEFAULT_MODEL
|
| 137 |
+
temperature = temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE
|
| 138 |
+
|
| 139 |
+
# Default system prompt if none provided
|
| 140 |
+
if system_prompt is None:
|
| 141 |
+
system_prompt = """You are an expert bird identification assistant with access to:
|
| 142 |
+
2. **Bird Classifier** - Identify birds from images with high accuracy
|
| 143 |
+
2. **eBird Database** - Find recent sightings, hotspots, and species info
|
| 144 |
+
|
| 145 |
+
**Your capabilities:**
|
| 146 |
+
- Classify bird images and provide confidence scores
|
| 147 |
+
- Find where birds have been spotted recently
|
| 148 |
+
- Recommend birding locations
|
| 149 |
+
- Answer questions about bird species and habitats
|
| 150 |
+
|
| 151 |
+
**Response style:**
|
| 152 |
+
- Be enthusiastic and educational
|
| 153 |
+
- Always cite confidence scores for identifications
|
| 154 |
+
- Provide actionable location recommendations
|
| 155 |
+
- Format responses clearly with markdown
|
| 156 |
+
|
| 157 |
+
Let's explore the amazing world of bids together!
|
| 158 |
+
|
| 159 |
+
"""
|
| 160 |
+
# Connect to both MCP servers
|
| 161 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 162 |
+
tools = await MCPClientManager.get_tools(client)
|
| 163 |
+
|
| 164 |
+
# Create LLM with streaming enabled (this is default)
|
| 165 |
+
model = ChatOpenAI(
|
| 166 |
+
model=model_name,
|
| 167 |
+
temperature=temperature,
|
| 168 |
+
streaming=True, # <- Key for token streaming!
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Create agent with custom prompt
|
| 172 |
+
agent_kwargs = {
|
| 173 |
+
"model": model,
|
| 174 |
+
"tools": tools,
|
| 175 |
+
"system_prompt": system_prompt
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
# Add memory
|
| 179 |
+
if with_memory:
|
| 180 |
+
agent_kwargs["checkpointer"] = InMemorySaver()
|
| 181 |
+
|
| 182 |
+
print("[STATUS]: Creating LangGraph agent with streaming...")
|
| 183 |
+
agent = create_agent(**agent_kwargs)
|
| 184 |
+
|
| 185 |
+
print("[SUCCESS]: Streaming agent ready!\n")
|
| 186 |
+
return agent
|
| 187 |
+
|
| 188 |
+
@staticmethod
|
| 189 |
+
async def create_streaming_agent_with_openai(
|
| 190 |
+
model: str = "gpt-4o-mini",
|
| 191 |
+
openai_key: str = None,
|
| 192 |
+
temperature: Optional[float] = None,
|
| 193 |
+
system_prompt: Optional[str] = None,
|
| 194 |
+
with_memory: bool = True
|
| 195 |
+
):
|
| 196 |
+
"""
|
| 197 |
+
Create streaming agent with OpenAI LLM using user-provided API key.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
model: OpenAI model name (e.g., "gpt-4o-mini", "gpt-4o")
|
| 201 |
+
openai_key: User's OpenAI key (required)
|
| 202 |
+
temperature: Sampling temperature (0-2), defaults to config
|
| 203 |
+
system_prompt: Custom system prompt
|
| 204 |
+
with_memory: Enable conversation memory
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
LangGraph agent with OpenAI LLM and MCP tools
|
| 208 |
+
"""
|
| 209 |
+
# Step 1: Validate that we have a key
|
| 210 |
+
if not openai_key:
|
| 211 |
+
raise ValueError("OpenAI key is required")
|
| 212 |
+
|
| 213 |
+
print(f"[AGENT] Creating OpenAI agent with model: {model}")
|
| 214 |
+
|
| 215 |
+
# Step 2: Get MCP tools
|
| 216 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 217 |
+
tools = await MCPClientManager.get_tools(client)
|
| 218 |
+
|
| 219 |
+
# Step 3: Create OpenAI LLM with USER'S key (not from .env)
|
| 220 |
+
llm = ChatOpenAI(
|
| 221 |
+
model=model,
|
| 222 |
+
api_key=openai_key, # <- KEY DIFFERENCE: explicit api_key parameter
|
| 223 |
+
temperature=temperature if temperature is not None else AgentConfig.OPENAI_TEMPERATURE,
|
| 224 |
+
streaming=True
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# Step 4: Build agent
|
| 228 |
+
agent_kwargs = {
|
| 229 |
+
"model": llm,
|
| 230 |
+
"tools": tools,
|
| 231 |
+
"system_prompt": system_prompt or "You are a helpful AI assistant."
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
if with_memory:
|
| 235 |
+
agent_kwargs["checkpointer"] = InMemorySaver()
|
| 236 |
+
|
| 237 |
+
agent = create_agent(**agent_kwargs)
|
| 238 |
+
return agent
|
| 239 |
+
|
| 240 |
+
@staticmethod
|
| 241 |
+
async def create_streaming_agent_with_hf(
|
| 242 |
+
model: str = "meta-llama/Llama-3.1-8B-Instruct",
|
| 243 |
+
hf_token: str = None,
|
| 244 |
+
temperature: Optional[float] = None,
|
| 245 |
+
system_prompt: Optional[str] = None,
|
| 246 |
+
with_memory: bool = True
|
| 247 |
+
):
|
| 248 |
+
"""
|
| 249 |
+
Create streaming agent with HuggingFace Inference Providers.
|
| 250 |
+
|
| 251 |
+
Uses HF's OpenAI-compatible router endpoint for full tool calling support.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
model: HF model repo ID (e.g., "meta-llama/Llama-3.1-8B-Instruct")
|
| 255 |
+
hf_token: User's HF API token (required)
|
| 256 |
+
temperature: Sampling temperature (0-1), defaults to config
|
| 257 |
+
system_prompt: Custom system prompt
|
| 258 |
+
with_memory: Enable conversation memory
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
LangGraph agent with HF LLM and MCP tools
|
| 262 |
+
"""
|
| 263 |
+
# Step 1: Validate that we have a token
|
| 264 |
+
if not hf_token:
|
| 265 |
+
raise ValueError("HuggingFace token is required")
|
| 266 |
+
|
| 267 |
+
print(f"[AGENT] Creating HuggingFace agent with model: {model}")
|
| 268 |
+
|
| 269 |
+
# Step 2: Get MCP tools (same as before)
|
| 270 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 271 |
+
tools = await MCPClientManager.get_tools(client)
|
| 272 |
+
|
| 273 |
+
# Step 3: Create HuggingFace LLM using Inference Providers router
|
| 274 |
+
# This uses HF's OpenAI-compatible endpoint with full tool calling support
|
| 275 |
+
llm = ChatOpenAI(
|
| 276 |
+
base_url="https://router.huggingface.co/v1",
|
| 277 |
+
api_key=hf_token,
|
| 278 |
+
model=model,
|
| 279 |
+
temperature=temperature if temperature is not None else AgentConfig.HF_TEMPERATURE,
|
| 280 |
+
streaming=True
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
# Step 4: Build agent (same structure as OpenAI version)
|
| 284 |
+
agent_kwargs = {
|
| 285 |
+
"model": llm,
|
| 286 |
+
"tools": tools,
|
| 287 |
+
"system_prompt": system_prompt or "You are a helpful AI assistant."
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
if with_memory:
|
| 291 |
+
agent_kwargs["checkpointer"] = InMemorySaver()
|
| 292 |
+
|
| 293 |
+
agent = create_agent(**agent_kwargs)
|
| 294 |
+
return agent
|
| 295 |
+
|
| 296 |
+
@staticmethod
|
| 297 |
+
async def create_subagent_orchestrator(
|
| 298 |
+
model: str,
|
| 299 |
+
api_key: str,
|
| 300 |
+
provider: str,
|
| 301 |
+
mode: str = "Single Agent (All Tools)"
|
| 302 |
+
):
|
| 303 |
+
"""
|
| 304 |
+
Create agent using subagent architecture (always uses subagent system).
|
| 305 |
+
|
| 306 |
+
Args:
|
| 307 |
+
model: LLM model name
|
| 308 |
+
api_key: API key for the provider
|
| 309 |
+
provider: LLM provider ("openai" or "huggingface")
|
| 310 |
+
mode: Agent mode (e.g., "Single Agent (All Tools)", "Specialized Subagents (3 Specialists)")
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
Configured agent (single subagent or router workflow)
|
| 314 |
+
"""
|
| 315 |
+
from .subagent_config import SubAgentConfig
|
| 316 |
+
from .subagent_router import create_router_agent
|
| 317 |
+
from .subagent_factory import SubAgentFactory
|
| 318 |
+
from langchain_openai import ChatOpenAI
|
| 319 |
+
|
| 320 |
+
# Get mode configuration
|
| 321 |
+
mode_config = SubAgentConfig.get_mode_config(mode)
|
| 322 |
+
print(f"[AGENT]: Creating agent in '{mode}' mode")
|
| 323 |
+
|
| 324 |
+
# Create LLM based on provider
|
| 325 |
+
if provider == "huggingface":
|
| 326 |
+
llm = ChatOpenAI(
|
| 327 |
+
base_url="https://router.huggingface.co/v1",
|
| 328 |
+
api_key=api_key,
|
| 329 |
+
model=model,
|
| 330 |
+
temperature=AgentConfig.HF_TEMPERATURE,
|
| 331 |
+
streaming=True
|
| 332 |
+
)
|
| 333 |
+
else: # openai
|
| 334 |
+
llm = ChatOpenAI(
|
| 335 |
+
model=model,
|
| 336 |
+
api_key=api_key,
|
| 337 |
+
temperature=AgentConfig.OPENAI_TEMPERATURE,
|
| 338 |
+
streaming=True
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
# Get all MCP tools
|
| 342 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 343 |
+
tools = await MCPClientManager.get_tools(client)
|
| 344 |
+
|
| 345 |
+
# Create agent based on mode
|
| 346 |
+
if mode_config["use_router"]:
|
| 347 |
+
# Multi-agent mode: create router with specialists
|
| 348 |
+
print(f"[AGENT]: Creating router with subagents: {mode_config['subagents']}")
|
| 349 |
+
workflow = await create_router_agent(tools, llm)
|
| 350 |
+
return workflow
|
| 351 |
+
else:
|
| 352 |
+
# Single agent mode: create one subagent directly
|
| 353 |
+
subagent_name = mode_config["subagents"][0]
|
| 354 |
+
print(f"[AGENT]: Creating single subagent: {subagent_name}")
|
| 355 |
+
agent = await SubAgentFactory.create_subagent(subagent_name, tools, llm)
|
| 356 |
+
return agent
|
| 357 |
+
|
| 358 |
+
# Convenience functions (backwards compatible)
|
| 359 |
+
async def create_bird_agent(**kwargs):
|
| 360 |
+
"""Create basic classifier agent. Alias for backwards compatibility."""
|
| 361 |
+
return await AgentFactory.create_classifier_agent(**kwargs)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
async def create_multi_agent(**kwargs):
|
| 365 |
+
"""Create multi-server agent. Alias for convenience."""
|
| 366 |
+
return await AgentFactory.create_multi_server_agent(**kwargs)
|
langgraph_agent/agents_README.md
ADDED
|
@@ -0,0 +1,1195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LangGraph Bird Classification Agent - Complete Guide
|
| 2 |
+
|
| 3 |
+
**A beginner-friendly guide to setting up and using the bird classification agent system.**
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
## Table of Contents
|
| 8 |
+
|
| 9 |
+
> **🚀 First time here?** Start with [Installation](#installation) → [Configuration](#configuration) → [CLI Commands](#cli-commands)
|
| 10 |
+
> **🐦 Want interactive chat?** Jump to [eBird Server Setup](#ebird-server-setup)
|
| 11 |
+
|
| 12 |
+
1. [What is This?](#what-is-this)
|
| 13 |
+
2. [Prerequisites](#prerequisites)
|
| 14 |
+
3. [Installation](#installation)
|
| 15 |
+
4. [Configuration](#configuration)
|
| 16 |
+
5. [Usage Guide](#usage-guide)
|
| 17 |
+
6. [eBird Server Setup](#ebird-server-setup) ⭐ **Important for interactive mode**
|
| 18 |
+
7. [CLI Commands](#cli-commands)
|
| 19 |
+
8. [Programmatic Usage](#programmatic-usage)
|
| 20 |
+
9. [Advanced Configuration](#advanced-configuration)
|
| 21 |
+
10. [Troubleshooting](#troubleshooting)
|
| 22 |
+
11. [Testing](#testing)
|
| 23 |
+
12. [Examples](#examples)
|
| 24 |
+
|
| 25 |
+
---
|
| 26 |
+
|
| 27 |
+
## What is This?
|
| 28 |
+
|
| 29 |
+
This is an intelligent AI agent that can:
|
| 30 |
+
- **Identify birds** from images (via URLs or uploads)
|
| 31 |
+
- **Answer questions** about bird species
|
| 32 |
+
- **Find bird sightings** near locations (when connected to eBird server)
|
| 33 |
+
- **Recommend birding hotspots**
|
| 34 |
+
- **Hold conversations** with memory of previous messages
|
| 35 |
+
|
| 36 |
+
**Technology Stack:**
|
| 37 |
+
- **LangGraph:** Agent framework
|
| 38 |
+
- **LangChain:** LLM integration
|
| 39 |
+
- **MCP (Model Context Protocol):** Tool integration
|
| 40 |
+
- **Modal:** GPU-powered bird classifier
|
| 41 |
+
- **OpenAI GPT:** Agent reasoning
|
| 42 |
+
|
| 43 |
+
---
|
| 44 |
+
|
| 45 |
+
## Prerequisites
|
| 46 |
+
|
| 47 |
+
### Required
|
| 48 |
+
|
| 49 |
+
1. **Python 3.11+**
|
| 50 |
+
```bash
|
| 51 |
+
python --version # Should be 3.11 or higher
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
2. **Virtual Environment** (recommended)
|
| 55 |
+
```bash
|
| 56 |
+
python -m venv .venv-hackathon
|
| 57 |
+
source .venv-hackathon/bin/activate # Mac/Linux
|
| 58 |
+
# or
|
| 59 |
+
.venv-hackathon\Scripts\activate # Windows
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
3. **API Keys:**
|
| 63 |
+
- **OpenAI API Key** (for GPT models) - Get from: https://platform.openai.com/api-keys
|
| 64 |
+
- **Modal Bird Classifier URL** (from Phase 1 deployment)
|
| 65 |
+
- **Bird Classifier API Key** (from Modal secrets)
|
| 66 |
+
|
| 67 |
+
### Optional (for multi-server setup)
|
| 68 |
+
|
| 69 |
+
4. **eBird API Key** (for bird data) - Get from: https://ebird.org/api/keygen
|
| 70 |
+
5. **eBird MCP Server** running locally or deployed
|
| 71 |
+
|
| 72 |
+
---
|
| 73 |
+
|
| 74 |
+
## Installation
|
| 75 |
+
|
| 76 |
+
### Step 1: Navigate to Directory
|
| 77 |
+
|
| 78 |
+
```bash
|
| 79 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
### Step 2: Activate Virtual Environment
|
| 83 |
+
|
| 84 |
+
```bash
|
| 85 |
+
source ../.venv-hackathon/bin/activate
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
### Step 3: Install Dependencies
|
| 89 |
+
|
| 90 |
+
```bash
|
| 91 |
+
# Install all required packages
|
| 92 |
+
pip install -r langgraph_agent/requirements.txt
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
**What gets installed:**
|
| 96 |
+
- `langchain` - LLM framework
|
| 97 |
+
- `langchain-openai` - OpenAI integration
|
| 98 |
+
- `langgraph` - Agent framework
|
| 99 |
+
- `langchain-mcp-adapters` - MCP protocol support
|
| 100 |
+
- `fastmcp` - MCP client/server
|
| 101 |
+
- `python-dotenv` - Environment variable management
|
| 102 |
+
|
| 103 |
+
### Step 4: Verify Installation
|
| 104 |
+
|
| 105 |
+
```bash
|
| 106 |
+
python -c "from langgraph_agent import AgentFactory; print('✅ Installation successful!')"
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
If you see `✅ Installation successful!`, you're ready to proceed!
|
| 110 |
+
|
| 111 |
+
---
|
| 112 |
+
|
| 113 |
+
## Configuration
|
| 114 |
+
|
| 115 |
+
### Step 1: Understand the Configuration File
|
| 116 |
+
|
| 117 |
+
The agent uses `langgraph_agent/config.py` to manage all settings. It reads from environment variables in your `.env` file.
|
| 118 |
+
|
| 119 |
+
### Step 2: Create/Update .env File
|
| 120 |
+
|
| 121 |
+
Navigate to the project root:
|
| 122 |
+
```bash
|
| 123 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
Edit `.env` file (create if it doesn't exist):
|
| 127 |
+
```bash
|
| 128 |
+
nano .env # or use your favorite editor
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
### Step 3: Add Required Variables
|
| 132 |
+
|
| 133 |
+
**Minimal Configuration (Classifier Only):**
|
| 134 |
+
```bash
|
| 135 |
+
# OpenAI API Key (Required)
|
| 136 |
+
OPENAI_API_KEY=sk-proj-your-openai-api-key-here
|
| 137 |
+
|
| 138 |
+
# Modal Bird Classifier (Required)
|
| 139 |
+
MODAL_MCP_URL=https://yourname--bird-classifier-mcp-web.modal.run/mcp
|
| 140 |
+
BIRD_CLASSIFIER_API_KEY=your-modal-api-key-here
|
| 141 |
+
|
| 142 |
+
# Agent Settings (Optional - uses defaults if not set)
|
| 143 |
+
LLM_MODEL=gpt-4o-mini
|
| 144 |
+
LLM_TEMPERATURE=0
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
**Full Configuration (With eBird Server):**
|
| 148 |
+
```bash
|
| 149 |
+
# OpenAI API Key
|
| 150 |
+
OPENAI_API_KEY=sk-proj-your-openai-api-key-here
|
| 151 |
+
|
| 152 |
+
# Modal Bird Classifier
|
| 153 |
+
MODAL_MCP_URL=https://yourname--bird-classifier-mcp-web.modal.run/mcp
|
| 154 |
+
BIRD_CLASSIFIER_API_KEY=your-modal-api-key-here
|
| 155 |
+
|
| 156 |
+
# eBird MCP Server
|
| 157 |
+
EBIRD_MCP_URL=http://localhost:8000/mcp
|
| 158 |
+
EBIRD_USE_STDIO=false
|
| 159 |
+
MCP_API_KEY=your-ebird-mcp-key # Optional, for production
|
| 160 |
+
|
| 161 |
+
# eBird API (if running eBird server)
|
| 162 |
+
EBIRD_API_KEY=your-ebird-api-key
|
| 163 |
+
|
| 164 |
+
# Agent Settings
|
| 165 |
+
LLM_MODEL=gpt-4o-mini
|
| 166 |
+
LLM_TEMPERATURE=0
|
| 167 |
+
AGENT_MAX_ITERATIONS=10
|
| 168 |
+
AGENT_TIMEOUT=120
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
### Step 4: Save and Verify Configuration
|
| 172 |
+
|
| 173 |
+
Save the file and verify it loads correctly:
|
| 174 |
+
|
| 175 |
+
```bash
|
| 176 |
+
python -c "from langgraph_agent import AgentConfig; AgentConfig.validate(); AgentConfig.print_config()"
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
**Expected Output:**
|
| 180 |
+
```
|
| 181 |
+
======================================================================
|
| 182 |
+
Agent Configuration
|
| 183 |
+
======================================================================
|
| 184 |
+
Modal URL: https://yourname--bird-classifier-mcp-web.modal.run/mcp
|
| 185 |
+
Modal API Key: your-modal-api-key...
|
| 186 |
+
eBird URL: http://localhost:8000/mcp
|
| 187 |
+
eBird Stdio: False
|
| 188 |
+
LLM Model: gpt-4o-mini
|
| 189 |
+
Temperature: 0.0
|
| 190 |
+
======================================================================
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
---
|
| 194 |
+
|
| 195 |
+
## Usage Guide
|
| 196 |
+
|
| 197 |
+
### Understanding Agent Types
|
| 198 |
+
|
| 199 |
+
The system provides **two types of agents**:
|
| 200 |
+
|
| 201 |
+
> **IMPORTANT FOR BEGINNERS:** The classifier agent (type 1) works immediately after installation. The multi-server agent (type 2) requires the eBird server to be running first. See [eBird Server Setup](#ebird-server-setup) below.
|
| 202 |
+
|
| 203 |
+
#### 1. Classifier Agent (Simple)
|
| 204 |
+
- **Purpose:** Identify birds from images
|
| 205 |
+
- **Tools:** 2 (classify_from_url, classify_from_base64)
|
| 206 |
+
- **Best for:** Quick bird identification
|
| 207 |
+
- **MCP Servers:** Modal classifier only
|
| 208 |
+
|
| 209 |
+
#### 2. Multi-Server Agent (Advanced)
|
| 210 |
+
- **Purpose:** Full bird identification + data exploration
|
| 211 |
+
- **Tools:** 9 (2 from Modal + 7 from eBird)
|
| 212 |
+
- **Best for:** Conversational bird exploration
|
| 213 |
+
- **MCP Servers:** Modal classifier + eBird data
|
| 214 |
+
|
| 215 |
+
---
|
| 216 |
+
|
| 217 |
+
### Visual Guide: Which Agent Type Do I Use?
|
| 218 |
+
|
| 219 |
+
```
|
| 220 |
+
┌─────────────────────────────────────────────────────────────────┐
|
| 221 |
+
│ CLASSIFIER AGENT (Simple) │
|
| 222 |
+
├─────────────────────────────────────────────────────────────────┤
|
| 223 |
+
│ │
|
| 224 |
+
│ What you need: │
|
| 225 |
+
│ ✅ Modal MCP URL (from Phase 1) │
|
| 226 |
+
│ ✅ Modal API Key │
|
| 227 |
+
│ ✅ OpenAI API Key │
|
| 228 |
+
│ ❌ eBird server (NOT needed) │
|
| 229 |
+
│ │
|
| 230 |
+
│ What it can do: │
|
| 231 |
+
│ ✅ Identify birds from image URLs │
|
| 232 |
+
│ ✅ Identify birds from base64 images │
|
| 233 |
+
│ ❌ Find bird sightings near locations │
|
| 234 |
+
│ ❌ Get eBird hotspot information │
|
| 235 |
+
│ │
|
| 236 |
+
│ How to use: │
|
| 237 |
+
│ python -m langgraph_agent demo │
|
| 238 |
+
│ python langgraph_agent/test_agent.py │
|
| 239 |
+
│ │
|
| 240 |
+
└─────────────────────────────────────────────────────────────────┘
|
| 241 |
+
|
| 242 |
+
┌─────────────────────────────────────────────────────────────────┐
|
| 243 |
+
│ MULTI-SERVER AGENT (Advanced) │
|
| 244 |
+
├─────────────────────────────────────────────────────────────────┤
|
| 245 |
+
│ │
|
| 246 |
+
│ What you need: │
|
| 247 |
+
│ ✅ Modal MCP URL │
|
| 248 |
+
│ ✅ Modal API Key │
|
| 249 |
+
│ ✅ OpenAI API Key │
|
| 250 |
+
│ ✅ eBird API Key │
|
| 251 |
+
│ ✅ eBird MCP server running (see setup guide below) │
|
| 252 |
+
│ │
|
| 253 |
+
│ What it can do: │
|
| 254 |
+
│ ✅ Everything from Classifier Agent + │
|
| 255 |
+
│ ✅ Find recent bird sightings near any location │
|
| 256 |
+
│ ✅ Discover birding hotspots │
|
| 257 |
+
│ ✅ Get notable sightings │
|
| 258 |
+
│ ✅ Search for specific species │
|
| 259 |
+
│ ✅ Conversational memory │
|
| 260 |
+
│ │
|
| 261 |
+
│ How to use: │
|
| 262 |
+
│ 1. Start eBird server: python ebird_tools.py --http --port 8000│
|
| 263 |
+
│ 2. python -m langgraph_agent interactive │
|
| 264 |
+
│ 3. python langgraph_agent/test_agent.py multi │
|
| 265 |
+
│ │
|
| 266 |
+
└─────────────────────────────────────────────────────────────────┘
|
| 267 |
+
```
|
| 268 |
+
|
| 269 |
+
---
|
| 270 |
+
|
| 271 |
+
## eBird Server Setup
|
| 272 |
+
|
| 273 |
+
### When Do You Need the eBird Server?
|
| 274 |
+
|
| 275 |
+
**You DON'T need eBird server for:**
|
| 276 |
+
- ✅ `python -m langgraph_agent demo` (classifier only)
|
| 277 |
+
- ✅ `python langgraph_agent/test_agent.py` (basic classifier tests)
|
| 278 |
+
- ✅ Creating classifier agents in your own code
|
| 279 |
+
|
| 280 |
+
**You DO need eBird server for:**
|
| 281 |
+
- ❌ `python -m langgraph_agent interactive` (multi-server chat)
|
| 282 |
+
- ❌ `python langgraph_agent/test_agent.py multi` (multi-server tests)
|
| 283 |
+
- ❌ Creating multi-server agents in your own code
|
| 284 |
+
- ❌ Gradio app (`app.py`)
|
| 285 |
+
|
| 286 |
+
---
|
| 287 |
+
|
| 288 |
+
### Two Ways to Run eBird Server
|
| 289 |
+
|
| 290 |
+
#### **Option 1: stdio (Recommended for Local Development)** ⭐
|
| 291 |
+
|
| 292 |
+
**Advantages:**
|
| 293 |
+
- ✅ No manual server management
|
| 294 |
+
- ✅ Agent auto-starts/stops server
|
| 295 |
+
- ✅ One terminal window only
|
| 296 |
+
- ✅ Simpler for beginners
|
| 297 |
+
|
| 298 |
+
**Setup:**
|
| 299 |
+
1. **Add to .env:**
|
| 300 |
+
```bash
|
| 301 |
+
EBIRD_USE_STDIO=true
|
| 302 |
+
EBIRD_API_KEY=your-ebird-api-key-here
|
| 303 |
+
```
|
| 304 |
+
|
| 305 |
+
2. **That's it!** Just run your agent:
|
| 306 |
+
```bash
|
| 307 |
+
python -m langgraph_agent interactive
|
| 308 |
+
# eBird server starts automatically!
|
| 309 |
+
```
|
| 310 |
+
|
| 311 |
+
---
|
| 312 |
+
|
| 313 |
+
#### **Option 2: HTTP (For Production/Manual Control)**
|
| 314 |
+
|
| 315 |
+
**Advantages:**
|
| 316 |
+
- ✅ Better for production deployment
|
| 317 |
+
- ✅ One server serves multiple agents
|
| 318 |
+
- ✅ More control over server lifecycle
|
| 319 |
+
|
| 320 |
+
**Setup:**
|
| 321 |
+
|
| 322 |
+
1. **Configure .env:**
|
| 323 |
+
```bash
|
| 324 |
+
EBIRD_USE_STDIO=false
|
| 325 |
+
EBIRD_MCP_URL=http://localhost:8000/mcp
|
| 326 |
+
EBIRD_API_KEY=your-ebird-api-key-here
|
| 327 |
+
```
|
| 328 |
+
|
| 329 |
+
2. **Terminal 1 - Start eBird Server:**
|
| 330 |
+
```bash
|
| 331 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 332 |
+
source ../.venv-hackathon/bin/activate
|
| 333 |
+
|
| 334 |
+
python ebird_tools.py --http --port 8000
|
| 335 |
+
```
|
| 336 |
+
|
| 337 |
+
3. **Verify server (new terminal):**
|
| 338 |
+
```bash
|
| 339 |
+
curl http://localhost:8000/health
|
| 340 |
+
# Should return: {"status": "ok"}
|
| 341 |
+
```
|
| 342 |
+
|
| 343 |
+
4. **Terminal 2 - Run Agent:**
|
| 344 |
+
```bash
|
| 345 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 346 |
+
source ../.venv-hackathon/bin/activate
|
| 347 |
+
|
| 348 |
+
python -m langgraph_agent interactive
|
| 349 |
+
```
|
| 350 |
+
|
| 351 |
+
---
|
| 352 |
+
|
| 353 |
+
### ✅ MCP_API_KEY Authentication Verification
|
| 354 |
+
|
| 355 |
+
YES, `app.py` is properly configured to use the eBird MCP API key. Here's the authentication flow:
|
| 356 |
+
|
| 357 |
+
**Authentication Chain:**
|
| 358 |
+
- `.env:11` - Defines `MCP_API_KEY=test-api-key`
|
| 359 |
+
- `config.py:21` - Loads it as `AgentConfig.MCP_API_KEY`
|
| 360 |
+
- `mcp_clients.py:63-71` - Uses it for eBird HTTP authentication:
|
| 361 |
+
```python
|
| 362 |
+
# HTTP transport (server running separately)
|
| 363 |
+
ebird_headers = {}
|
| 364 |
+
if AgentConfig.MCP_API_KEY:
|
| 365 |
+
ebird_headers["Authorization"] = f"Bearer {AgentConfig.MCP_API_KEY}"
|
| 366 |
+
|
| 367 |
+
servers_config["ebird"] = {
|
| 368 |
+
"transport": "streamable_http",
|
| 369 |
+
"url": AgentConfig.EBIRD_MCP_URL,
|
| 370 |
+
"headers": ebird_headers if ebird_headers else None
|
| 371 |
+
}
|
| 372 |
+
```
|
| 373 |
+
- `app.py:18` - Calls `AgentFactory.create_streaming_agent()` → which calls `MCPClientManager.create_multi_server_client()` → which includes the authenticated eBird connection
|
| 374 |
+
|
| 375 |
+
**Current setup (from .env):**
|
| 376 |
+
- `EBIRD_USE_STDIO=false` - Using HTTP mode
|
| 377 |
+
- `MCP_API_KEY=test-api-key` - Auth key for eBird server
|
| 378 |
+
- Authentication header: `Authorization: Bearer test-api-key`
|
| 379 |
+
|
| 380 |
+
---
|
| 381 |
+
|
| 382 |
+
### Troubleshooting eBird Server
|
| 383 |
+
|
| 384 |
+
#### Problem: "Connection refused" or "All connection attempts failed"
|
| 385 |
+
|
| 386 |
+
**Cause:** eBird server is not running
|
| 387 |
+
|
| 388 |
+
**Solution:**
|
| 389 |
+
1. Check if server terminal is still running
|
| 390 |
+
2. Restart the server: `python ebird_tools.py --http --port 8000`
|
| 391 |
+
3. Verify with: `curl http://localhost:8000/health`
|
| 392 |
+
|
| 393 |
+
#### Problem: "Address already in use" when starting server
|
| 394 |
+
|
| 395 |
+
**Cause:** Port 8000 is already taken
|
| 396 |
+
|
| 397 |
+
**Solution 1:** Stop the existing process
|
| 398 |
+
```bash
|
| 399 |
+
# Find process using port 8000
|
| 400 |
+
lsof -ti:8000
|
| 401 |
+
|
| 402 |
+
# Kill it
|
| 403 |
+
kill -9 $(lsof -ti:8000)
|
| 404 |
+
|
| 405 |
+
# Start server again
|
| 406 |
+
python ebird_tools.py --http --port 8000
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
**Solution 2:** Use a different port
|
| 410 |
+
```bash
|
| 411 |
+
# Start on different port
|
| 412 |
+
python ebird_tools.py --http --port 8001
|
| 413 |
+
|
| 414 |
+
# Update .env
|
| 415 |
+
EBIRD_MCP_URL=http://localhost:8001/mcp
|
| 416 |
+
```
|
| 417 |
+
|
| 418 |
+
#### Problem: "EBIRD_API_KEY not set"
|
| 419 |
+
|
| 420 |
+
**Cause:** Missing eBird API key in .env
|
| 421 |
+
|
| 422 |
+
**Solution:**
|
| 423 |
+
1. Get API key from: https://ebird.org/api/keygen
|
| 424 |
+
2. Add to `.env` file:
|
| 425 |
+
```bash
|
| 426 |
+
EBIRD_API_KEY=your-key-here
|
| 427 |
+
```
|
| 428 |
+
3. Restart the eBird server
|
| 429 |
+
|
| 430 |
+
---
|
| 431 |
+
|
| 432 |
+
### Quick Reference: Server vs No Server
|
| 433 |
+
|
| 434 |
+
| Command | Needs eBird Server? | What It Does |
|
| 435 |
+
|---------|-------------------|--------------|
|
| 436 |
+
| `python -m langgraph_agent demo` | ❌ No | Test classifier with single image |
|
| 437 |
+
| `python -m langgraph_agent demo [url]` | ❌ No | Test classifier with custom image |
|
| 438 |
+
| `python -m langgraph_agent interactive` | ✅ Yes | Chat with full agent (classifier + eBird) |
|
| 439 |
+
| `python langgraph_agent/test_agent.py` | ❌ No | Run classifier tests (2 images) |
|
| 440 |
+
| `python langgraph_agent/test_agent.py multi` | ✅ Yes | Run multi-server tests (requires eBird) |
|
| 441 |
+
|
| 442 |
+
---
|
| 443 |
+
|
| 444 |
+
## CLI Commands
|
| 445 |
+
|
| 446 |
+
### Running the CLI
|
| 447 |
+
|
| 448 |
+
The agent package can be run as a Python module from the project root:
|
| 449 |
+
|
| 450 |
+
```bash
|
| 451 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 452 |
+
python -m langgraph_agent [command] [options]
|
| 453 |
+
```
|
| 454 |
+
|
| 455 |
+
---
|
| 456 |
+
|
| 457 |
+
### Command 1: Demo Mode (Default)
|
| 458 |
+
|
| 459 |
+
**Purpose:** Quick test with a single image classification
|
| 460 |
+
|
| 461 |
+
**Usage:**
|
| 462 |
+
```bash
|
| 463 |
+
# Use default test image
|
| 464 |
+
python -m langgraph_agent demo
|
| 465 |
+
|
| 466 |
+
# Or specify your own image URL
|
| 467 |
+
python -m langgraph_agent demo "https://example.com/bird.jpg"
|
| 468 |
+
```
|
| 469 |
+
|
| 470 |
+
**What happens:**
|
| 471 |
+
1. Loads configuration from `.env`
|
| 472 |
+
2. Prints config summary
|
| 473 |
+
3. Creates classifier agent
|
| 474 |
+
4. Classifies the bird image
|
| 475 |
+
5. Prints results
|
| 476 |
+
|
| 477 |
+
**Example Output:**
|
| 478 |
+
```
|
| 479 |
+
======================================================================
|
| 480 |
+
Agent Configuration
|
| 481 |
+
======================================================================
|
| 482 |
+
Modal URL: https://jakeworkoutharder-dev--bird-classifier...
|
| 483 |
+
Modal API Key: b1363a05f1ef1a8c...
|
| 484 |
+
LLM Model: gpt-4o-mini
|
| 485 |
+
Temperature: 0.0
|
| 486 |
+
======================================================================
|
| 487 |
+
|
| 488 |
+
[STATUS]: Connecting to Modal MCP server...
|
| 489 |
+
[STATUS]: Loading MCP tools...
|
| 490 |
+
[LOADED]: 2 tools - ['classify_from_base64', 'classify_from_url']
|
| 491 |
+
[STATUS]: Creating LangGraph agent...
|
| 492 |
+
[SUCCESS]: Agent ready!
|
| 493 |
+
|
| 494 |
+
======================================================================
|
| 495 |
+
Testing bird classification...
|
| 496 |
+
[IMAGE URL]: https://images.unsplash.com/photo-1445820200644...
|
| 497 |
+
|
| 498 |
+
[AGENT RESPONSE]:
|
| 499 |
+
The bird in the image is a **Grandala** with a confidence score
|
| 500 |
+
of **99.9%**! What a beautiful bird!
|
| 501 |
+
|
| 502 |
+
[DEMO COMPLETE!]
|
| 503 |
+
```
|
| 504 |
+
|
| 505 |
+
---
|
| 506 |
+
|
| 507 |
+
### Command 2: Interactive Mode
|
| 508 |
+
|
| 509 |
+
> **⚠️ IMPORTANT:** This command requires the eBird server to be running first! See [eBird Server Setup](#ebird-server-setup) for step-by-step instructions.
|
| 510 |
+
|
| 511 |
+
**Purpose:** Chat with the agent in real-time
|
| 512 |
+
|
| 513 |
+
**Usage:**
|
| 514 |
+
```bash
|
| 515 |
+
python -m langgraph_agent interactive
|
| 516 |
+
```
|
| 517 |
+
|
| 518 |
+
**What happens:**
|
| 519 |
+
1. Creates multi-server agent with memory
|
| 520 |
+
2. Opens interactive chat
|
| 521 |
+
3. Maintains conversation history
|
| 522 |
+
4. Type queries, get responses
|
| 523 |
+
5. Quit with 'exit' or 'quit'
|
| 524 |
+
|
| 525 |
+
**Example Session:**
|
| 526 |
+
```
|
| 527 |
+
======================================================================
|
| 528 |
+
Bird Classification Agent - Interactive Mode
|
| 529 |
+
======================================================================
|
| 530 |
+
Commands:
|
| 531 |
+
- Type 'quit' or 'exit' to end session
|
| 532 |
+
- Paste image URLs to classify birds
|
| 533 |
+
- Ask about bird locations, sightings, hotspots
|
| 534 |
+
======================================================================
|
| 535 |
+
|
| 536 |
+
[STATUS]: Connecting to Modal and eBird MCP servers...
|
| 537 |
+
[STATUS]: Loading MCP tools...
|
| 538 |
+
[LOADED]: 9 tools - ['classify_from_base64', 'classify_from_url',
|
| 539 |
+
'search_species', 'get_recent_sightings_nearby', ...]
|
| 540 |
+
[STATUS]: Creating multi-server LangGraph agent...
|
| 541 |
+
[SUCCESS]: Agent ready with all tools!
|
| 542 |
+
|
| 543 |
+
You: What bird is this? https://example.com/cardinal.jpg
|
| 544 |
+
|
| 545 |
+
Agent: This is a **Northern Cardinal** with 98.7% confidence! These
|
| 546 |
+
beautiful red birds are common across North America and known for
|
| 547 |
+
their bright plumage and distinctive crest.
|
| 548 |
+
|
| 549 |
+
You: Where can I see them near Boston?
|
| 550 |
+
|
| 551 |
+
Agent: Northern Cardinals have been spotted recently near Boston!
|
| 552 |
+
Here are some locations:
|
| 553 |
+
- Mount Auburn Cemetery (15 sightings this week)
|
| 554 |
+
- Arnold Arboretum (12 sightings)
|
| 555 |
+
- Fresh Pond Reservation (8 sightings)
|
| 556 |
+
|
| 557 |
+
You: quit
|
| 558 |
+
|
| 559 |
+
Goodbye! Happy birding!
|
| 560 |
+
```
|
| 561 |
+
|
| 562 |
+
**Key Features:**
|
| 563 |
+
- 🧠 **Remembers conversation** - References previous messages
|
| 564 |
+
- 🔧 **Auto-selects tools** - Agent decides which tools to use
|
| 565 |
+
- 💬 **Natural language** - Talk like you would to a person
|
| 566 |
+
|
| 567 |
+
---
|
| 568 |
+
|
| 569 |
+
## Programmatic Usage
|
| 570 |
+
|
| 571 |
+
### Import the Package
|
| 572 |
+
|
| 573 |
+
From any Python file in your project:
|
| 574 |
+
|
| 575 |
+
```python
|
| 576 |
+
import asyncio
|
| 577 |
+
from langgraph_agent import AgentFactory, AgentConfig
|
| 578 |
+
```
|
| 579 |
+
|
| 580 |
+
---
|
| 581 |
+
|
| 582 |
+
### Example 1: Simple Bird Classification
|
| 583 |
+
|
| 584 |
+
```python
|
| 585 |
+
import asyncio
|
| 586 |
+
from langgraph_agent import AgentFactory
|
| 587 |
+
|
| 588 |
+
async def classify_bird():
|
| 589 |
+
# Create classifier agent
|
| 590 |
+
agent = await AgentFactory.create_classifier_agent()
|
| 591 |
+
|
| 592 |
+
# Classify a bird
|
| 593 |
+
result = await agent.ainvoke({
|
| 594 |
+
"messages": [{
|
| 595 |
+
"role": "user",
|
| 596 |
+
"content": "What bird is this? https://example.com/bird.jpg"
|
| 597 |
+
}]
|
| 598 |
+
})
|
| 599 |
+
|
| 600 |
+
# Print response
|
| 601 |
+
print(result["messages"][-1].content)
|
| 602 |
+
|
| 603 |
+
# Run
|
| 604 |
+
asyncio.run(classify_bird())
|
| 605 |
+
```
|
| 606 |
+
|
| 607 |
+
---
|
| 608 |
+
|
| 609 |
+
### Example 2: Conversational Agent with Memory
|
| 610 |
+
|
| 611 |
+
```python
|
| 612 |
+
import asyncio
|
| 613 |
+
from langgraph_agent import AgentFactory
|
| 614 |
+
|
| 615 |
+
async def conversation():
|
| 616 |
+
# Create agent with memory
|
| 617 |
+
agent = await AgentFactory.create_classifier_agent(with_memory=True)
|
| 618 |
+
|
| 619 |
+
# Thread ID for conversation tracking
|
| 620 |
+
config = {"configurable": {"thread_id": "user_123"}}
|
| 621 |
+
|
| 622 |
+
# Turn 1: Classify bird
|
| 623 |
+
result1 = await agent.ainvoke({
|
| 624 |
+
"messages": [{
|
| 625 |
+
"role": "user",
|
| 626 |
+
"content": "Identify this: https://example.com/cardinal.jpg"
|
| 627 |
+
}]
|
| 628 |
+
}, config)
|
| 629 |
+
print("Turn 1:", result1["messages"][-1].content)
|
| 630 |
+
|
| 631 |
+
# Turn 2: Ask follow-up (agent remembers the bird!)
|
| 632 |
+
result2 = await agent.ainvoke({
|
| 633 |
+
"messages": [{
|
| 634 |
+
"role": "user",
|
| 635 |
+
"content": "What color is this bird?"
|
| 636 |
+
}]
|
| 637 |
+
}, config)
|
| 638 |
+
print("Turn 2:", result2["messages"][-1].content)
|
| 639 |
+
|
| 640 |
+
asyncio.run(conversation())
|
| 641 |
+
```
|
| 642 |
+
|
| 643 |
+
**Output:**
|
| 644 |
+
```
|
| 645 |
+
Turn 1: This is a Northern Cardinal with 98.7% confidence!
|
| 646 |
+
Turn 2: The Northern Cardinal is bright red, which we identified
|
| 647 |
+
in the previous image. Males are vibrant red while females are
|
| 648 |
+
brown with red highlights.
|
| 649 |
+
```
|
| 650 |
+
|
| 651 |
+
---
|
| 652 |
+
|
| 653 |
+
### Example 3: Custom Configuration
|
| 654 |
+
|
| 655 |
+
```python
|
| 656 |
+
import asyncio
|
| 657 |
+
from langgraph_agent import AgentFactory
|
| 658 |
+
|
| 659 |
+
async def custom_agent():
|
| 660 |
+
# Create agent with custom settings
|
| 661 |
+
agent = await AgentFactory.create_classifier_agent(
|
| 662 |
+
model_name="gpt-4o", # Use more powerful model
|
| 663 |
+
temperature=0.3, # Add creativity
|
| 664 |
+
with_memory=True # Enable conversation memory
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
# Use the agent...
|
| 668 |
+
result = await agent.ainvoke({
|
| 669 |
+
"messages": [{
|
| 670 |
+
"role": "user",
|
| 671 |
+
"content": "Classify and tell me a fun fact about this bird: URL"
|
| 672 |
+
}]
|
| 673 |
+
})
|
| 674 |
+
|
| 675 |
+
print(result["messages"][-1].content)
|
| 676 |
+
|
| 677 |
+
asyncio.run(custom_agent())
|
| 678 |
+
```
|
| 679 |
+
|
| 680 |
+
---
|
| 681 |
+
|
| 682 |
+
### Example 4: Multi-Server Agent (Classifier + eBird)
|
| 683 |
+
|
| 684 |
+
```python
|
| 685 |
+
import asyncio
|
| 686 |
+
from langgraph_agent import AgentFactory
|
| 687 |
+
|
| 688 |
+
async def multi_server_agent():
|
| 689 |
+
# Create agent with both MCP servers
|
| 690 |
+
agent = await AgentFactory.create_multi_server_agent(
|
| 691 |
+
with_memory=True
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
config = {"configurable": {"thread_id": "session_1"}}
|
| 695 |
+
|
| 696 |
+
# Agent can now use 9 tools across 2 servers!
|
| 697 |
+
result = await agent.ainvoke({
|
| 698 |
+
"messages": [{
|
| 699 |
+
"role": "user",
|
| 700 |
+
"content": "What bird is this? Where can I see it near NYC? https://example.com/bird.jpg"
|
| 701 |
+
}]
|
| 702 |
+
}, config)
|
| 703 |
+
|
| 704 |
+
print(result["messages"][-1].content)
|
| 705 |
+
|
| 706 |
+
asyncio.run(multi_server_agent())
|
| 707 |
+
```
|
| 708 |
+
|
| 709 |
+
**What the agent does:**
|
| 710 |
+
1. Uses `classify_from_url` to identify the bird
|
| 711 |
+
2. Uses `search_species` to get species code
|
| 712 |
+
3. Uses `get_recent_sightings_nearby` to find sightings near NYC
|
| 713 |
+
4. Formats response with all information
|
| 714 |
+
|
| 715 |
+
---
|
| 716 |
+
|
| 717 |
+
### Example 5: Check Configuration
|
| 718 |
+
|
| 719 |
+
```python
|
| 720 |
+
from langgraph_agent import AgentConfig
|
| 721 |
+
|
| 722 |
+
# Print current configuration
|
| 723 |
+
AgentConfig.print_config()
|
| 724 |
+
|
| 725 |
+
# Access specific values
|
| 726 |
+
print(f"Using model: {AgentConfig.DEFAULT_MODEL}")
|
| 727 |
+
print(f"Modal URL: {AgentConfig.MODAL_MCP_URL}")
|
| 728 |
+
|
| 729 |
+
# Validate configuration
|
| 730 |
+
try:
|
| 731 |
+
AgentConfig.validate()
|
| 732 |
+
print("✅ Configuration is valid!")
|
| 733 |
+
except ValueError as e:
|
| 734 |
+
print(f"❌ Configuration error: {e}")
|
| 735 |
+
```
|
| 736 |
+
|
| 737 |
+
---
|
| 738 |
+
|
| 739 |
+
## Advanced Configuration
|
| 740 |
+
|
| 741 |
+
### Changing the LLM Model
|
| 742 |
+
|
| 743 |
+
Edit `.env` file:
|
| 744 |
+
```bash
|
| 745 |
+
# Use GPT-4o (more powerful, slower, more expensive)
|
| 746 |
+
LLM_MODEL=gpt-4o
|
| 747 |
+
|
| 748 |
+
# Use GPT-4o-mini (faster, cheaper, default)
|
| 749 |
+
LLM_MODEL=gpt-4o-mini
|
| 750 |
+
|
| 751 |
+
# Use GPT-3.5 (cheapest, fastest, less capable)
|
| 752 |
+
LLM_MODEL=gpt-3.5-turbo
|
| 753 |
+
```
|
| 754 |
+
|
| 755 |
+
**Or override in code:**
|
| 756 |
+
```python
|
| 757 |
+
agent = await AgentFactory.create_classifier_agent(
|
| 758 |
+
model_name="gpt-4o" # Override default
|
| 759 |
+
)
|
| 760 |
+
```
|
| 761 |
+
|
| 762 |
+
---
|
| 763 |
+
|
| 764 |
+
### Adjusting Temperature
|
| 765 |
+
|
| 766 |
+
**Temperature controls creativity:**
|
| 767 |
+
- `0.0` = Deterministic, factual (recommended for classification)
|
| 768 |
+
- `0.5` = Balanced
|
| 769 |
+
- `1.0` = Creative, varied responses
|
| 770 |
+
|
| 771 |
+
In `.env`:
|
| 772 |
+
```bash
|
| 773 |
+
LLM_TEMPERATURE=0.3
|
| 774 |
+
```
|
| 775 |
+
|
| 776 |
+
Or in code:
|
| 777 |
+
```python
|
| 778 |
+
agent = await AgentFactory.create_classifier_agent(
|
| 779 |
+
temperature=0.3
|
| 780 |
+
)
|
| 781 |
+
```
|
| 782 |
+
|
| 783 |
+
---
|
| 784 |
+
|
| 785 |
+
### Configuring eBird Server Transport
|
| 786 |
+
|
| 787 |
+
**Option 1: HTTP Transport (default)**
|
| 788 |
+
```bash
|
| 789 |
+
EBIRD_USE_STDIO=false
|
| 790 |
+
EBIRD_MCP_URL=http://localhost:8000/mcp
|
| 791 |
+
```
|
| 792 |
+
|
| 793 |
+
Start eBird server separately:
|
| 794 |
+
```bash
|
| 795 |
+
python ebird_tools.py --http --port 8000
|
| 796 |
+
```
|
| 797 |
+
|
| 798 |
+
**Option 2: Stdio Transport**
|
| 799 |
+
```bash
|
| 800 |
+
EBIRD_USE_STDIO=true
|
| 801 |
+
```
|
| 802 |
+
|
| 803 |
+
Agent will automatically start eBird server as subprocess.
|
| 804 |
+
|
| 805 |
+
---
|
| 806 |
+
|
| 807 |
+
## Troubleshooting
|
| 808 |
+
|
| 809 |
+
### Issue 1: "ModuleNotFoundError: No module named 'langgraph_agent'"
|
| 810 |
+
|
| 811 |
+
**Cause:** Package not in Python path
|
| 812 |
+
|
| 813 |
+
**Solution:**
|
| 814 |
+
```bash
|
| 815 |
+
# Make sure you're in the correct directory
|
| 816 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 817 |
+
|
| 818 |
+
# Verify package exists
|
| 819 |
+
ls langgraph_agent/
|
| 820 |
+
|
| 821 |
+
# Try import again
|
| 822 |
+
python -c "from langgraph_agent import AgentFactory"
|
| 823 |
+
```
|
| 824 |
+
|
| 825 |
+
---
|
| 826 |
+
|
| 827 |
+
### Issue 2: "ValueError: MODAL_MCP_URL not set in .env"
|
| 828 |
+
|
| 829 |
+
**Cause:** Missing or incorrect environment variables
|
| 830 |
+
|
| 831 |
+
**Solution:**
|
| 832 |
+
```bash
|
| 833 |
+
# Check .env file exists
|
| 834 |
+
ls -la .env
|
| 835 |
+
|
| 836 |
+
# Verify contents
|
| 837 |
+
cat .env | grep MODAL_MCP_URL
|
| 838 |
+
|
| 839 |
+
# Make sure it's set correctly (no quotes, no trailing slash)
|
| 840 |
+
MODAL_MCP_URL=https://your-url-here/mcp
|
| 841 |
+
```
|
| 842 |
+
|
| 843 |
+
---
|
| 844 |
+
|
| 845 |
+
### Issue 3: "401 Unauthorized" when calling Modal
|
| 846 |
+
|
| 847 |
+
**Cause:** Incorrect API key
|
| 848 |
+
|
| 849 |
+
**Solution:**
|
| 850 |
+
```bash
|
| 851 |
+
# Verify API key in .env matches Modal secret
|
| 852 |
+
cat .env | grep BIRD_CLASSIFIER_API_KEY
|
| 853 |
+
|
| 854 |
+
# Get correct key from Modal
|
| 855 |
+
modal secret list
|
| 856 |
+
|
| 857 |
+
# Update .env with correct key
|
| 858 |
+
```
|
| 859 |
+
|
| 860 |
+
---
|
| 861 |
+
|
| 862 |
+
### Issue 4: Agent is slow (30+ seconds per request)
|
| 863 |
+
|
| 864 |
+
**Cause:** Using expensive model or large conversation history
|
| 865 |
+
|
| 866 |
+
**Solution 1: Use faster model**
|
| 867 |
+
```bash
|
| 868 |
+
# In .env
|
| 869 |
+
LLM_MODEL=gpt-4o-mini # Instead of gpt-4o
|
| 870 |
+
```
|
| 871 |
+
|
| 872 |
+
**Solution 2: Limit conversation history**
|
| 873 |
+
```python
|
| 874 |
+
from langchain_core.messages.utils import trim_messages
|
| 875 |
+
|
| 876 |
+
# Trim to last 10 messages before invoking
|
| 877 |
+
messages = trim_messages(state["messages"], max_tokens=2000)
|
| 878 |
+
```
|
| 879 |
+
|
| 880 |
+
---
|
| 881 |
+
|
| 882 |
+
### Issue 5: "Too many tools loaded" or incorrect tool count
|
| 883 |
+
|
| 884 |
+
**Cause:** Wrong server configuration
|
| 885 |
+
|
| 886 |
+
**Expected tool counts:**
|
| 887 |
+
- Classifier only: **2 tools**
|
| 888 |
+
- Multi-server: **9 tools** (2 + 7)
|
| 889 |
+
|
| 890 |
+
**Solution:**
|
| 891 |
+
```bash
|
| 892 |
+
# Check which agent type you're creating
|
| 893 |
+
# For classifier only:
|
| 894 |
+
agent = await AgentFactory.create_classifier_agent()
|
| 895 |
+
|
| 896 |
+
# For multi-server:
|
| 897 |
+
agent = await AgentFactory.create_multi_server_agent()
|
| 898 |
+
```
|
| 899 |
+
|
| 900 |
+
---
|
| 901 |
+
|
| 902 |
+
## Testing
|
| 903 |
+
|
| 904 |
+
### Component Testing (Individual Parts)
|
| 905 |
+
|
| 906 |
+
Before testing agents, verify each component works individually.
|
| 907 |
+
|
| 908 |
+
#### Test Modal Classifier (Step 1)
|
| 909 |
+
|
| 910 |
+
**Verify Modal deployment:**
|
| 911 |
+
```bash
|
| 912 |
+
modal app list
|
| 913 |
+
```
|
| 914 |
+
|
| 915 |
+
**Test with curl:**
|
| 916 |
+
```bash
|
| 917 |
+
curl -X POST \
|
| 918 |
+
-H "Content-Type: application/json" \
|
| 919 |
+
-H "Accept: application/json, text/event-stream" \
|
| 920 |
+
-H "X-API-Key: YOUR_API_KEY" \
|
| 921 |
+
-d '{
|
| 922 |
+
"jsonrpc": "2.0",
|
| 923 |
+
"id": 1,
|
| 924 |
+
"method": "initialize",
|
| 925 |
+
"params": {
|
| 926 |
+
"protocolVersion": "2024-11-05",
|
| 927 |
+
"capabilities": {},
|
| 928 |
+
"clientInfo": {
|
| 929 |
+
"name": "test",
|
| 930 |
+
"version": "1.0"
|
| 931 |
+
}
|
| 932 |
+
}
|
| 933 |
+
}' \
|
| 934 |
+
https://yourname--bird-classifier-mcp-web.modal.run/mcp
|
| 935 |
+
```
|
| 936 |
+
|
| 937 |
+
✅ **Success:** Modal responds with JSON result
|
| 938 |
+
|
| 939 |
+
---
|
| 940 |
+
|
| 941 |
+
### Agent Testing (Combined System)
|
| 942 |
+
|
| 943 |
+
#### Test 1: Basic Classifier Agent (No eBird Server Needed)
|
| 944 |
+
|
| 945 |
+
This test works immediately after installation - no eBird server required!
|
| 946 |
+
|
| 947 |
+
```bash
|
| 948 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 949 |
+
|
| 950 |
+
# Activate virtual environment
|
| 951 |
+
source ../.venv-hackathon/bin/activate
|
| 952 |
+
|
| 953 |
+
# Test basic classifier agent (2 bird images)
|
| 954 |
+
python langgraph_agent/test_agent.py
|
| 955 |
+
```
|
| 956 |
+
|
| 957 |
+
**Expected Output:**
|
| 958 |
+
```
|
| 959 |
+
======================================================================
|
| 960 |
+
Test Suite: Basic Classifier Agent
|
| 961 |
+
======================================================================
|
| 962 |
+
|
| 963 |
+
[STATUS]: Connecting to Modal MCP server...
|
| 964 |
+
[STATUS]: Loading MCP tools...
|
| 965 |
+
[LOADED]: 2 tools - ['classify_from_base64', 'classify_from_url']
|
| 966 |
+
[STATUS]: Creating LangGraph agent...
|
| 967 |
+
[SUCCESS]: Agent ready!
|
| 968 |
+
|
| 969 |
+
[TEST 1/2]
|
| 970 |
+
======================================================================
|
| 971 |
+
|
| 972 |
+
[RESULT]: The bird in the image is a **Jandaya Parakeet**!
|
| 973 |
+
I have a high confidence score of **99.84%** in this identification.
|
| 974 |
+
|
| 975 |
+
[TEST 2/2]
|
| 976 |
+
======================================================================
|
| 977 |
+
|
| 978 |
+
[RESULT]: The bird in the image is identified as a **Grandala**
|
| 979 |
+
with a confidence score of **99.9%**! What a beautiful bird!
|
| 980 |
+
|
| 981 |
+
[ALL TESTS COMPLETE!]
|
| 982 |
+
```
|
| 983 |
+
|
| 984 |
+
---
|
| 985 |
+
|
| 986 |
+
#### Test 2: Multi-Server Agent (Requires eBird Server)
|
| 987 |
+
|
| 988 |
+
This test requires the eBird server to be running first!
|
| 989 |
+
|
| 990 |
+
**Step-by-Step:**
|
| 991 |
+
|
| 992 |
+
1. **Terminal 1 - Start eBird Server:**
|
| 993 |
+
```bash
|
| 994 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 995 |
+
source ../.venv-hackathon/bin/activate
|
| 996 |
+
|
| 997 |
+
# Start eBird server (keep this running)
|
| 998 |
+
python ebird_tools.py --http --port 8000
|
| 999 |
+
```
|
| 1000 |
+
|
| 1001 |
+
2. **Terminal 2 - Run Multi-Server Test:**
|
| 1002 |
+
```bash
|
| 1003 |
+
cd /Users/jacobbinder/Desktop/hackathon/hackathon_draft
|
| 1004 |
+
source ../.venv-hackathon/bin/activate
|
| 1005 |
+
|
| 1006 |
+
# Run multi-server test
|
| 1007 |
+
python langgraph_agent/test_agent.py multi
|
| 1008 |
+
```
|
| 1009 |
+
|
| 1010 |
+
**Expected Output:**
|
| 1011 |
+
```
|
| 1012 |
+
======================================================================
|
| 1013 |
+
Test Suite: Multi-Server Agent
|
| 1014 |
+
======================================================================
|
| 1015 |
+
|
| 1016 |
+
[STATUS]: Connecting to Modal and eBird servers...
|
| 1017 |
+
[STATUS]: Loading MCP tools...
|
| 1018 |
+
[LOADED]: 9 tools available
|
| 1019 |
+
- classify_from_base64
|
| 1020 |
+
- classify_from_url
|
| 1021 |
+
- search_species
|
| 1022 |
+
- get_recent_sightings_nearby
|
| 1023 |
+
- get_notable_sightings_nearby
|
| 1024 |
+
- get_recent_checklists_nearby
|
| 1025 |
+
- get_regional_statistics
|
| 1026 |
+
- list_hotspots_nearby
|
| 1027 |
+
- get_checklist_details
|
| 1028 |
+
|
| 1029 |
+
[TEST 1]: Classify bird from URL
|
| 1030 |
+
======================================================================
|
| 1031 |
+
|
| 1032 |
+
[RESULT]: The bird in the image is a **Jandaya Parakeet** with
|
| 1033 |
+
99.84% confidence!
|
| 1034 |
+
|
| 1035 |
+
[TEST 2]: Follow-up question (tests memory)
|
| 1036 |
+
======================================================================
|
| 1037 |
+
|
| 1038 |
+
[RESULT]: You can see Jandaya Parakeets near Boston (42.36, -71.06):
|
| 1039 |
+
- Arnold Arboretum (3 sightings this month)
|
| 1040 |
+
- Mount Auburn Cemetery (2 sightings)
|
| 1041 |
+
Note: These are exotic birds, not native to the area.
|
| 1042 |
+
|
| 1043 |
+
[ALL TESTS COMPLETE!]
|
| 1044 |
+
```
|
| 1045 |
+
|
| 1046 |
+
**What This Tests:**
|
| 1047 |
+
- ✅ Multi-server connection (Modal + eBird)
|
| 1048 |
+
- ✅ Tool integration (9 tools from both servers)
|
| 1049 |
+
- ✅ Conversation memory (follow-up question)
|
| 1050 |
+
- ✅ Cross-server reasoning (classify → location lookup)
|
| 1051 |
+
|
| 1052 |
+
---
|
| 1053 |
+
|
| 1054 |
+
## Examples
|
| 1055 |
+
|
| 1056 |
+
### Complete Example: Bird Identification App
|
| 1057 |
+
|
| 1058 |
+
```python
|
| 1059 |
+
"""
|
| 1060 |
+
Simple bird identification CLI app
|
| 1061 |
+
"""
|
| 1062 |
+
import asyncio
|
| 1063 |
+
from langgraph_agent import AgentFactory
|
| 1064 |
+
|
| 1065 |
+
async def main():
|
| 1066 |
+
print("🐦 Bird Identification App")
|
| 1067 |
+
print("=" * 50)
|
| 1068 |
+
|
| 1069 |
+
# Create agent
|
| 1070 |
+
print("\n[1/3] Initializing agent...")
|
| 1071 |
+
agent = await AgentFactory.create_classifier_agent()
|
| 1072 |
+
|
| 1073 |
+
# Get image URL from user
|
| 1074 |
+
print("\n[2/3] Waiting for input...")
|
| 1075 |
+
image_url = input("Enter bird image URL: ").strip()
|
| 1076 |
+
|
| 1077 |
+
if not image_url:
|
| 1078 |
+
print("❌ No URL provided")
|
| 1079 |
+
return
|
| 1080 |
+
|
| 1081 |
+
# Classify
|
| 1082 |
+
print("\n[3/3] Classifying...")
|
| 1083 |
+
result = await agent.ainvoke({
|
| 1084 |
+
"messages": [{
|
| 1085 |
+
"role": "user",
|
| 1086 |
+
"content": f"Identify this bird: {image_url}"
|
| 1087 |
+
}]
|
| 1088 |
+
})
|
| 1089 |
+
|
| 1090 |
+
# Display result
|
| 1091 |
+
print("\n" + "=" * 50)
|
| 1092 |
+
print("RESULT:")
|
| 1093 |
+
print("=" * 50)
|
| 1094 |
+
print(result["messages"][-1].content)
|
| 1095 |
+
|
| 1096 |
+
if __name__ == "__main__":
|
| 1097 |
+
asyncio.run(main())
|
| 1098 |
+
```
|
| 1099 |
+
|
| 1100 |
+
**Run it:**
|
| 1101 |
+
```bash
|
| 1102 |
+
python my_bird_app.py
|
| 1103 |
+
```
|
| 1104 |
+
|
| 1105 |
+
---
|
| 1106 |
+
|
| 1107 |
+
## Quick Reference
|
| 1108 |
+
|
| 1109 |
+
### Import Patterns
|
| 1110 |
+
|
| 1111 |
+
```python
|
| 1112 |
+
# Import everything
|
| 1113 |
+
from langgraph_agent import (
|
| 1114 |
+
AgentFactory,
|
| 1115 |
+
AgentConfig,
|
| 1116 |
+
create_bird_agent,
|
| 1117 |
+
create_multi_agent,
|
| 1118 |
+
MCPClientManager,
|
| 1119 |
+
get_prompt_for_agent_type
|
| 1120 |
+
)
|
| 1121 |
+
|
| 1122 |
+
# Or import as needed
|
| 1123 |
+
from langgraph_agent import AgentFactory
|
| 1124 |
+
```
|
| 1125 |
+
|
| 1126 |
+
### Create Agent (Simple)
|
| 1127 |
+
|
| 1128 |
+
```python
|
| 1129 |
+
# Minimal
|
| 1130 |
+
agent = await AgentFactory.create_classifier_agent()
|
| 1131 |
+
|
| 1132 |
+
# With memory
|
| 1133 |
+
agent = await AgentFactory.create_classifier_agent(with_memory=True)
|
| 1134 |
+
|
| 1135 |
+
# Custom model
|
| 1136 |
+
agent = await AgentFactory.create_classifier_agent(
|
| 1137 |
+
model_name="gpt-4o",
|
| 1138 |
+
temperature=0.3
|
| 1139 |
+
)
|
| 1140 |
+
```
|
| 1141 |
+
|
| 1142 |
+
### Create Agent (Multi-Server)
|
| 1143 |
+
|
| 1144 |
+
```python
|
| 1145 |
+
# Default (with memory)
|
| 1146 |
+
agent = await AgentFactory.create_multi_server_agent()
|
| 1147 |
+
|
| 1148 |
+
# Custom settings
|
| 1149 |
+
agent = await AgentFactory.create_multi_server_agent(
|
| 1150 |
+
model_name="gpt-4o",
|
| 1151 |
+
temperature=0.5,
|
| 1152 |
+
with_memory=True
|
| 1153 |
+
)
|
| 1154 |
+
```
|
| 1155 |
+
|
| 1156 |
+
### Invoke Agent
|
| 1157 |
+
|
| 1158 |
+
```python
|
| 1159 |
+
# Single turn
|
| 1160 |
+
result = await agent.ainvoke({
|
| 1161 |
+
"messages": [{"role": "user", "content": "Your message"}]
|
| 1162 |
+
})
|
| 1163 |
+
|
| 1164 |
+
# With memory (multi-turn)
|
| 1165 |
+
config = {"configurable": {"thread_id": "session_1"}}
|
| 1166 |
+
result = await agent.ainvoke({
|
| 1167 |
+
"messages": [{"role": "user", "content": "Your message"}]
|
| 1168 |
+
}, config)
|
| 1169 |
+
```
|
| 1170 |
+
|
| 1171 |
+
---
|
| 1172 |
+
|
| 1173 |
+
## Summary
|
| 1174 |
+
|
| 1175 |
+
✅ **Installation:** Install dependencies, set up `.env`
|
| 1176 |
+
✅ **Configuration:** Edit `.env` with API keys and URLs
|
| 1177 |
+
✅ **Usage:** CLI commands or Python imports
|
| 1178 |
+
✅ **CLI:** `demo` for testing (no eBird), `interactive` for chat (needs eBird)
|
| 1179 |
+
✅ **Programmatic:** Import `AgentFactory`, create agents, invoke
|
| 1180 |
+
✅ **Testing:** Run `test_agent.py` to verify setup
|
| 1181 |
+
|
| 1182 |
+
**Ready to build?** Start with these commands (no eBird server needed):
|
| 1183 |
+
```bash
|
| 1184 |
+
# Test the classifier
|
| 1185 |
+
python -m langgraph_agent demo
|
| 1186 |
+
|
| 1187 |
+
# Run basic tests
|
| 1188 |
+
python langgraph_agent/test_agent.py
|
| 1189 |
+
```
|
| 1190 |
+
|
| 1191 |
+
**Want multi-server features?** See [eBird Server Setup](#ebird-server-setup) above.
|
| 1192 |
+
|
| 1193 |
+
**Questions?** Check the [HuggingFace Deployment Guide](../../project_docs/implementation/hf_deploy_planning.md) or [Phase 3 Documentation](../../project_docs/implementation/phase_3.md).
|
| 1194 |
+
|
| 1195 |
+
Happy birding! 🐦
|
langgraph_agent/config.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration for LangGraph agents.
|
| 3 |
+
Loads environment variables and manages settings.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
class AgentConfig:
|
| 12 |
+
"""Configuration for bird classification agents."""
|
| 13 |
+
|
| 14 |
+
# Modal Bird classifier (Phase 1)
|
| 15 |
+
MODAL_MCP_URL: str = os.getenv("MODAL_MCP_URL", "")
|
| 16 |
+
BIRD_CLASSIFIER_API_KEY: str = os.getenv("BIRD_CLASSIFIER_API_KEY", "")
|
| 17 |
+
|
| 18 |
+
# eBird Server (Phase 2 - deprecated, replaced by Nuthatch)
|
| 19 |
+
EBIRD_API_KEY: str = os.getenv("EBIRD_API_KEY", "") # For Cornell eBird API calls
|
| 20 |
+
EBIRD_BASE_URL: str = os.getenv("EBIRD_BASE_URL", "https://api.ebird.org/v2") # Cornell eBird API endpoint
|
| 21 |
+
EBIRD_MCP_URL: str = os.getenv("EBIRD_MCP_URL", "http://localhost:8000/mcp") # MCP server endpoint
|
| 22 |
+
EBIRD_USE_STDIO: bool = os.getenv("EBIRD_USE_STDIO", "false").lower() == "true"
|
| 23 |
+
EBIRD_MCP_AUTH_KEY: Optional[str] = os.getenv("EBIRD_MCP_AUTH_KEY") # For HTTP transport auth (legacy: MCP_API_KEY)
|
| 24 |
+
|
| 25 |
+
# Nuthatch Server (Phase 2.5 - species reference database)
|
| 26 |
+
NUTHATCH_API_KEY: str = os.getenv("NUTHATCH_API_KEY", "") # For Nuthatch API calls
|
| 27 |
+
NUTHATCH_BASE_URL: str = os.getenv("NUTHATCH_BASE_URL", "https://nuthatch.lastelm.software/v2") # Nuthatch API endpoint
|
| 28 |
+
NUTHATCH_MCP_URL: str = os.getenv("NUTHATCH_MCP_URL", "http://localhost:8001/mcp") # MCP server endpoint
|
| 29 |
+
NUTHATCH_USE_STDIO: bool = os.getenv("NUTHATCH_USE_STDIO", "true").lower() == "true"
|
| 30 |
+
NUTHATCH_MCP_AUTH_KEY: Optional[str] = os.getenv("NUTHATCH_MCP_AUTH_KEY") # For HTTP transport
|
| 31 |
+
|
| 32 |
+
# LLM Settings
|
| 33 |
+
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "") # For dev/testing with backend-provided key
|
| 34 |
+
DEFAULT_OPENAI_MODEL: str = os.getenv("DEFAULT_OPENAI_MODEL", "gpt-4o-mini")
|
| 35 |
+
DEFAULT_HF_MODEL: str = os.getenv("DEFAULT_HF_MODEL", "Qwen/Qwen2.5-Coder-32B-Instruct")
|
| 36 |
+
|
| 37 |
+
# Anthropic Settings
|
| 38 |
+
ANTHROPIC_API_KEY: str = os.getenv("ANTHROPIC_API_KEY", "")
|
| 39 |
+
DEFAULT_ANTHROPIC_MODEL: str = os.getenv("DEFAULT_ANTHROPIC_MODEL", "claude-sonnet-4-5-20250929")
|
| 40 |
+
ANTHROPIC_TEMPERATURE: float = float(os.getenv("ANTHROPIC_TEMPERATURE", "0.0"))
|
| 41 |
+
|
| 42 |
+
# Provider-specific temperature settings
|
| 43 |
+
OPENAI_TEMPERATURE: float = float(os.getenv("OPENAI_TEMPERATURE", "0.0"))
|
| 44 |
+
HF_TEMPERATURE: float = float(os.getenv("HF_TEMPERATURE", "0.1"))
|
| 45 |
+
|
| 46 |
+
# Agent Settings
|
| 47 |
+
MAX_ITERATIONS: int = int(os.getenv("AGENT_MAX_ITERATIONS", "10"))
|
| 48 |
+
TIMEOUT: int = int(os.getenv("AGENT_TIMEOUT", "120"))
|
| 49 |
+
|
| 50 |
+
@classmethod
|
| 51 |
+
def print_config(cls) -> None:
|
| 52 |
+
"""Print current configuration (hiding sensitive data)."""
|
| 53 |
+
print("\n" + "="*70)
|
| 54 |
+
print("Agent Configuration")
|
| 55 |
+
print("="*70)
|
| 56 |
+
print(f"[MODAL URL]: {cls.MODAL_MCP_URL}")
|
| 57 |
+
print(f"[MODAL API KEY]: {cls.BIRD_CLASSIFIER_API_KEY[:20]}..." if cls.BIRD_CLASSIFIER_API_KEY else "[MODAL API KEY]: Not set")
|
| 58 |
+
print(f"[NUTHATCH STDIO]: {cls.NUTHATCH_USE_STDIO}")
|
| 59 |
+
print(f"[NUTHATCH URL]: {cls.NUTHATCH_MCP_URL}")
|
| 60 |
+
print(f"[NUTHATCH API KEY]: {'✅ Set' if cls.NUTHATCH_API_KEY else '❌ Not set'}")
|
| 61 |
+
print(f"[OPENAI MODEL]: {cls.DEFAULT_OPENAI_MODEL}")
|
| 62 |
+
print(f"[OPENAI TEMP]: {cls.OPENAI_TEMPERATURE}")
|
| 63 |
+
print(f"[HF MODEL]: {cls.DEFAULT_HF_MODEL}")
|
| 64 |
+
print(f"[HF TEMP]: {cls.HF_TEMPERATURE}")
|
| 65 |
+
print(f"[ANTHROPIC_MODEL]: {cls.DEFAULT_ANTHROPIC_MODEL}")
|
| 66 |
+
print(f"[ANTHROPIC_TEMP]: {cls.ANTHROPIC_TEMPERATURE}")
|
| 67 |
+
print("="*70 + "\n")
|
langgraph_agent/main.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Main entry point for LangGraph bird classification agents.
|
| 3 |
+
"""
|
| 4 |
+
import asyncio
|
| 5 |
+
import sys
|
| 6 |
+
from typing import Optional
|
| 7 |
+
|
| 8 |
+
from .config import AgentConfig
|
| 9 |
+
from .agents import AgentFactory
|
| 10 |
+
|
| 11 |
+
async def run_classifier_demo(image_url: Optional[str] = None):
|
| 12 |
+
"""Run basic classifier demo"""
|
| 13 |
+
|
| 14 |
+
AgentConfig.print_config()
|
| 15 |
+
|
| 16 |
+
# Create agent
|
| 17 |
+
agent = await AgentFactory.create_classifier_agent()
|
| 18 |
+
|
| 19 |
+
# Default test URL if none provided
|
| 20 |
+
if not image_url:
|
| 21 |
+
image_url = "https://images.unsplash.com/photo-1445820200644-69f87d946277?w=400"
|
| 22 |
+
|
| 23 |
+
print("="*70)
|
| 24 |
+
print("Testing bird classification...")
|
| 25 |
+
print(f"[IMAGE URL]: {image_url}\n")
|
| 26 |
+
|
| 27 |
+
# Invoke agent
|
| 28 |
+
result = await agent.ainvoke({
|
| 29 |
+
"messages": [{
|
| 30 |
+
"role": "user",
|
| 31 |
+
"content": f"What bird species is this? {image_url}"
|
| 32 |
+
}]
|
| 33 |
+
})
|
| 34 |
+
|
| 35 |
+
# Print result
|
| 36 |
+
print("\n[AGENT RESPONSE]:")
|
| 37 |
+
print(result["messages"][-1].content)
|
| 38 |
+
print("\n[DEMO COMPLETE!]\n")
|
| 39 |
+
|
| 40 |
+
async def run_interactive_chat():
|
| 41 |
+
"""Run interactive chat with agent."""
|
| 42 |
+
|
| 43 |
+
print("\n"+"="*70)
|
| 44 |
+
print("Bird Classification Agent - Interactive Mode")
|
| 45 |
+
print("="*70)
|
| 46 |
+
print("Commands:")
|
| 47 |
+
print(". - Type 'quit' or 'exit' to end session")
|
| 48 |
+
print(" - Paste image URLs to classify birds")
|
| 49 |
+
print(". - Ask about bird locations, sightings, hotspots")
|
| 50 |
+
print("="*70+"\n")
|
| 51 |
+
|
| 52 |
+
# Create multi-server agent with memory
|
| 53 |
+
agent = await AgentFactory.create_multi_server_agent(with_memory=True)
|
| 54 |
+
|
| 55 |
+
# Thread ID for conversation memory
|
| 56 |
+
config = {"configurable": {"thread_id": "interactive_session"}}
|
| 57 |
+
|
| 58 |
+
while True:
|
| 59 |
+
try:
|
| 60 |
+
user_input = input("\nYou: ").strip()
|
| 61 |
+
|
| 62 |
+
if user_input.lower() in ['quit', 'exit', 'q']:
|
| 63 |
+
print("\nGoodbye! Happy birding!\n")
|
| 64 |
+
break
|
| 65 |
+
|
| 66 |
+
if not user_input:
|
| 67 |
+
continue
|
| 68 |
+
|
| 69 |
+
# Invoke agent
|
| 70 |
+
result = await agent.ainvoke(
|
| 71 |
+
{"messages": [{"role": "user", "content": user_input}]},
|
| 72 |
+
config
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Print response
|
| 76 |
+
print(f"\nAgent: {result['messages'][-1].content}")
|
| 77 |
+
|
| 78 |
+
except KeyboardInterrupt:
|
| 79 |
+
print("\nGoodbye! Happy birding!\n")
|
| 80 |
+
break
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"\n[ERROR]: {e}")
|
| 83 |
+
|
| 84 |
+
def main():
|
| 85 |
+
"""Main entry point"""
|
| 86 |
+
|
| 87 |
+
if len(sys.argv) > 1:
|
| 88 |
+
command = sys.argv[1]
|
| 89 |
+
|
| 90 |
+
if command == "interactive":
|
| 91 |
+
asyncio.run(run_interactive_chat())
|
| 92 |
+
elif command == "demo":
|
| 93 |
+
url = sys.argv[2] if len(sys.argv) > 2 else None
|
| 94 |
+
asyncio.run(run_classifier_demo(url))
|
| 95 |
+
else:
|
| 96 |
+
print(f"Unknown command: {command}")
|
| 97 |
+
print("Usage:")
|
| 98 |
+
print(" python -m langgraph_agent demo [url]")
|
| 99 |
+
print(" python -m langgraph_agent interactive")
|
| 100 |
+
else:
|
| 101 |
+
# Default: run demo
|
| 102 |
+
asyncio.run(run_classifier_demo())
|
| 103 |
+
|
| 104 |
+
if __name__ == "__main__":
|
| 105 |
+
main()
|
langgraph_agent/mcp_clients.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MCP client configuration and setup for bird classification agents.
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
from typing import List, Dict, Any
|
| 6 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 7 |
+
from .config import AgentConfig
|
| 8 |
+
|
| 9 |
+
class MCPClientManager:
|
| 10 |
+
"""Manages MCP client connections to various servers"""
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
async def create_classifier_client() -> MultiServerMCPClient:
|
| 14 |
+
"""
|
| 15 |
+
Create MCP client for Modal bird classifier only.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
MultiServerMCPClient configured for Modal server
|
| 19 |
+
"""
|
| 20 |
+
print("[STATUS]: Connecting to Modal MCP server...")
|
| 21 |
+
|
| 22 |
+
client = MultiServerMCPClient({
|
| 23 |
+
"bird_classifier": {
|
| 24 |
+
"transport": "streamable_http",
|
| 25 |
+
"url": AgentConfig.MODAL_MCP_URL,
|
| 26 |
+
"headers": {
|
| 27 |
+
"X-API-Key": AgentConfig.BIRD_CLASSIFIER_API_KEY
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
})
|
| 31 |
+
|
| 32 |
+
return client
|
| 33 |
+
|
| 34 |
+
@staticmethod
|
| 35 |
+
async def create_multi_server_client() -> MultiServerMCPClient:
|
| 36 |
+
"""
|
| 37 |
+
Create MCP client for both Modal classifier and Nuthatch species database.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
MultiServerMCPClient configured for both servers
|
| 41 |
+
"""
|
| 42 |
+
print("[STATUS]: Connecting to Modal and Nuthatch servers...")
|
| 43 |
+
|
| 44 |
+
servers_config = {
|
| 45 |
+
"bird_classifier": {
|
| 46 |
+
"transport": "streamable_http",
|
| 47 |
+
"url": AgentConfig.MODAL_MCP_URL,
|
| 48 |
+
"headers": {
|
| 49 |
+
"X-API-Key": AgentConfig.BIRD_CLASSIFIER_API_KEY
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
# Add Nuthatch server (Phase 2.5 - species reference database)
|
| 55 |
+
# Supports both STDIO (default) and HTTP transport
|
| 56 |
+
if AgentConfig.NUTHATCH_USE_STDIO:
|
| 57 |
+
# STDIO mode - subprocess for integrated agent use
|
| 58 |
+
servers_config["nuthatch"] = {
|
| 59 |
+
"transport": "stdio",
|
| 60 |
+
"command": "python",
|
| 61 |
+
"args": ["nuthatch_tools.py"], # Same directory as where app runs
|
| 62 |
+
"env": {
|
| 63 |
+
# Pass through critical env vars from parent process
|
| 64 |
+
# HuggingFace Spaces Secrets don't auto-inherit to subprocesses
|
| 65 |
+
"NUTHATCH_API_KEY": AgentConfig.NUTHATCH_API_KEY,
|
| 66 |
+
"NUTHATCH_BASE_URL": AgentConfig.NUTHATCH_BASE_URL
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
else:
|
| 70 |
+
# HTTP mode - external server (requires nuthatch_tools.py running separately)
|
| 71 |
+
nuthatch_config = {
|
| 72 |
+
"transport": "streamable_http",
|
| 73 |
+
"url": AgentConfig.NUTHATCH_MCP_URL
|
| 74 |
+
}
|
| 75 |
+
# Add auth header if configured (DebugTokenVerifier expects Bearer token)
|
| 76 |
+
if AgentConfig.NUTHATCH_MCP_AUTH_KEY:
|
| 77 |
+
nuthatch_config["headers"] = {
|
| 78 |
+
"Authorization": f"Bearer {AgentConfig.NUTHATCH_MCP_AUTH_KEY}"
|
| 79 |
+
}
|
| 80 |
+
servers_config["nuthatch"] = nuthatch_config
|
| 81 |
+
|
| 82 |
+
client = MultiServerMCPClient(servers_config)
|
| 83 |
+
return client
|
| 84 |
+
|
| 85 |
+
@staticmethod
|
| 86 |
+
async def get_tools(client: MultiServerMCPClient) -> List[Any]:
|
| 87 |
+
"""
|
| 88 |
+
Get tools from MCP client and print summary.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
client: MultiServerMCPClient instance
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
List of tools
|
| 95 |
+
"""
|
| 96 |
+
print("[STATUS]: Loading MCP tools...")
|
| 97 |
+
tools = await client.get_tools()
|
| 98 |
+
|
| 99 |
+
print(f"[LOADED]: {len(tools)} tools available")
|
| 100 |
+
for tool in tools:
|
| 101 |
+
print(f" - {tool.name}")
|
| 102 |
+
|
| 103 |
+
return tools
|
langgraph_agent/mcp_clients.py.ebird
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MCP client configuration and setup for bird classification agents.
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
from typing import List, Dict, Any
|
| 6 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 7 |
+
from .config import AgentConfig
|
| 8 |
+
|
| 9 |
+
class MCPClientManager:
|
| 10 |
+
"""Manages MCP client connections to various servers"""
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
async def create_classifier_client() -> MultiServerMCPClient:
|
| 14 |
+
"""
|
| 15 |
+
Create MCP client for Modal bird classifier only.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
MultiServerMCPClient configured for Modal server
|
| 19 |
+
"""
|
| 20 |
+
print("[STATUS]: Connecting to Modal MCP server...")
|
| 21 |
+
|
| 22 |
+
client = MultiServerMCPClient({
|
| 23 |
+
"bird_classifier": {
|
| 24 |
+
"transport": "streamable_http",
|
| 25 |
+
"url": AgentConfig.MODAL_MCP_URL,
|
| 26 |
+
"headers": {
|
| 27 |
+
"X-API-Key": AgentConfig.BIRD_CLASSIFIER_API_KEY
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
})
|
| 31 |
+
|
| 32 |
+
return client
|
| 33 |
+
|
| 34 |
+
@staticmethod
|
| 35 |
+
async def create_multi_server_client() -> MultiServerMCPClient:
|
| 36 |
+
"""
|
| 37 |
+
Create MCP client for both Modal classifier and eBird server.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
MultiServerMCPClient configured for both servers
|
| 41 |
+
"""
|
| 42 |
+
print("[STATUS]: Connecting to Modal and eBird servers...")
|
| 43 |
+
|
| 44 |
+
servers_config = {
|
| 45 |
+
"bird_classifier": {
|
| 46 |
+
"transport": "streamable_http",
|
| 47 |
+
"url": AgentConfig.MODAL_MCP_URL,
|
| 48 |
+
"headers": {
|
| 49 |
+
"X-API-Key": AgentConfig.BIRD_CLASSIFIER_API_KEY
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
# Add eBird server (Phase 2)
|
| 55 |
+
if AgentConfig.EBIRD_USE_STDIO:
|
| 56 |
+
# Stdio transport (run server as subprocess)
|
| 57 |
+
# IMPORTANT: Explicitly pass environment variables to subprocess
|
| 58 |
+
# HuggingFace Spaces Secrets don't auto-inherit to subprocesses
|
| 59 |
+
servers_config["ebird"] = {
|
| 60 |
+
"transport": "stdio",
|
| 61 |
+
"command": "python",
|
| 62 |
+
"args": ["ebird_tools.py"], # Same directory as where app runs
|
| 63 |
+
"env": {
|
| 64 |
+
# Pass through critical env vars from parent process
|
| 65 |
+
# HuggingFace Spaces Secrets don't auto-inherit to subprocesses
|
| 66 |
+
"EBIRD_API_KEY": AgentConfig.EBIRD_API_KEY,
|
| 67 |
+
"EBIRD_BASE_URL": AgentConfig.EBIRD_BASE_URL,
|
| 68 |
+
"ENVIRONMENT": os.getenv("ENVIRONMENT", "development"),
|
| 69 |
+
"MCP_API_KEY": AgentConfig.EBIRD_MCP_AUTH_KEY or "",
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
else:
|
| 73 |
+
# HTTP transport (server running separately)
|
| 74 |
+
ebird_config = {
|
| 75 |
+
"transport": "streamable_http",
|
| 76 |
+
"url": AgentConfig.EBIRD_MCP_URL
|
| 77 |
+
}
|
| 78 |
+
# Add auth header if configured (DebugTokenVerifier expects Bearer token)
|
| 79 |
+
if AgentConfig.EBIRD_MCP_AUTH_KEY:
|
| 80 |
+
ebird_config["headers"] = {
|
| 81 |
+
"Authorization": f"Bearer {AgentConfig.EBIRD_MCP_AUTH_KEY}"
|
| 82 |
+
}
|
| 83 |
+
servers_config["ebird"] = ebird_config
|
| 84 |
+
|
| 85 |
+
client = MultiServerMCPClient(servers_config)
|
| 86 |
+
return client
|
| 87 |
+
|
| 88 |
+
@staticmethod
|
| 89 |
+
async def get_tools(client: MultiServerMCPClient) -> List[Any]:
|
| 90 |
+
"""
|
| 91 |
+
Get tools from MCP client and print summary.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
client: MultiServerMCPClient instance
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
List of tools
|
| 98 |
+
"""
|
| 99 |
+
print("[STATUS]: Loading MCP tools...")
|
| 100 |
+
tools = await client.get_tools()
|
| 101 |
+
|
| 102 |
+
print(f"[LOADED]: {len(tools)} tools available")
|
| 103 |
+
for tool in tools:
|
| 104 |
+
print(f" - {tool.name}")
|
| 105 |
+
|
| 106 |
+
return tools
|
langgraph_agent/prompts.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
System prompts and templates for bird classification agents.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Basic classifier agent prompt
|
| 6 |
+
CLASSIFIER_AGENT_PROMPT = """
|
| 7 |
+
You are a helpful bird identification assistant.
|
| 8 |
+
|
| 9 |
+
When a user provides an image URL, use the classify_from_url tool to identify the bird species.
|
| 10 |
+
When a user provides a base64 image, use the classify_from_base64 tool.
|
| 11 |
+
|
| 12 |
+
Always report:
|
| 13 |
+
- Species name
|
| 14 |
+
- Confidence score (as percentage)
|
| 15 |
+
- Any interesting facts about the bird (if you know them)
|
| 16 |
+
|
| 17 |
+
If information is unavailable or you are not sure do not make it up but instead just state that you don't know.
|
| 18 |
+
|
| 19 |
+
If a user's query is not related to birds or bird identification then politely explain our role and abiltiies and that you can only answer bird-related queries.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
# Multi-server agent prompt (classifier + eBird)
|
| 23 |
+
MULTI_SERVER_AGENT_PROMPT = """
|
| 24 |
+
You are an expert bird identification and exploration assistant with access to two powerful systems:
|
| 25 |
+
|
| 26 |
+
1. **Bird Classifier** (Modal GPU)
|
| 27 |
+
- classify_from_url: Identify birds from image URLs
|
| 28 |
+
- classify_from_base64: Identify birds from base64 images
|
| 29 |
+
|
| 30 |
+
2. **eBird Database** (Global bird sighting data)
|
| 31 |
+
- search_species: Find species codes and info
|
| 32 |
+
- get_recent_sightings_nearby: Find recent sightings near a location
|
| 33 |
+
- find_hotspots_nearby: Discover popular birding locations
|
| 34 |
+
- get_location_birds: See all birds at a location
|
| 35 |
+
- get_notable_sightings: Find rare birds in a region
|
| 36 |
+
- analyze_location: Comprehensive location analysis
|
| 37 |
+
|
| 38 |
+
**Your workflow:**
|
| 39 |
+
1. When given an image: Classify it first, then optionally provide context using eBird tools
|
| 40 |
+
2. When asked about locations: Use eBird tools to find sightings and hotspots
|
| 41 |
+
3. When asked about species: Use search_species to get the species code, then other tools for details
|
| 42 |
+
|
| 43 |
+
Always be:
|
| 44 |
+
- Friendly and enthusiastic about birds
|
| 45 |
+
- Accurate with species identification
|
| 46 |
+
- Helpful with location recommendations
|
| 47 |
+
- Clear about confidence levels
|
| 48 |
+
|
| 49 |
+
If you're unsure, say so. Birding is about learning and discovery!
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
NUTHATCH_BIRDSCOPE_PROMPT = """You are BirdScope AI - an enthusiastic bird identification and education assistant!
|
| 53 |
+
|
| 54 |
+
**Your Mission:**
|
| 55 |
+
Help users identify birds from photos and provide rich educational content about bird species,
|
| 56 |
+
including reference images, audio recordings, conservation status, and taxonomic information.
|
| 57 |
+
|
| 58 |
+
**Available Tools (9 total):**
|
| 59 |
+
|
| 60 |
+
🔍 **Bird Classifier (Modal GPU):**
|
| 61 |
+
- classify_from_url: Identify birds from image URLs
|
| 62 |
+
- classify_from_base64: Identify birds from base64-encoded images
|
| 63 |
+
|
| 64 |
+
📚 **Species Database (Nuthatch API - 1000+ species):**
|
| 65 |
+
- search_birds: Multi-filter search (name, family, region, conservation status)
|
| 66 |
+
- get_bird_info: Complete species profile (taxonomy, size, status, images, audio count)
|
| 67 |
+
- get_bird_images: High-quality reference photos from Unsplash
|
| 68 |
+
- get_bird_audio: Sound recordings from xeno-canto.org
|
| 69 |
+
- search_by_family: Find all species in a taxonomic family
|
| 70 |
+
- filter_by_status: Search by conservation status (Endangered, Low Concern, etc.)
|
| 71 |
+
- get_all_families: List all bird families in the database
|
| 72 |
+
|
| 73 |
+
**Your Workflow:**
|
| 74 |
+
1. **Image Identification**: When users upload a bird photo, the classifier will identify it automatically
|
| 75 |
+
2. **Rich Context**: After identification, use Nuthatch tools to provide:
|
| 76 |
+
- Reference images (show users what the species looks like from different angles)
|
| 77 |
+
- Audio recordings (let them hear the bird's call/song)
|
| 78 |
+
- Conservation status and interesting facts
|
| 79 |
+
- Related species in the same family
|
| 80 |
+
3. **Educational Queries**: When users ask about specific birds or families, use search and info tools
|
| 81 |
+
4. **Visual Learning**: Always offer to show images and audio when discussing a species
|
| 82 |
+
|
| 83 |
+
**Response Style:**
|
| 84 |
+
- Be enthusiastic and educational
|
| 85 |
+
- Cite confidence scores for image classifications
|
| 86 |
+
- Describe what makes each species unique (visual features, sounds, behavior)
|
| 87 |
+
- Format image and audio URLs as clickable markdown links
|
| 88 |
+
- Mention conservation status to raise awareness
|
| 89 |
+
- Suggest related species they might be interested in
|
| 90 |
+
|
| 91 |
+
**Important Notes:**
|
| 92 |
+
- This is a SPECIES REFERENCE system, not a location finder (no real-time sightings or hotspots)
|
| 93 |
+
- Focus on "What is this bird?" rather than "Where can I see it?"
|
| 94 |
+
- Images are from Unsplash and curator collections (always high quality)
|
| 95 |
+
- Audio is from xeno-canto.org (community-contributed bird recordings)
|
| 96 |
+
- Database covers 1000+ species (primarily North America and Western Europe)
|
| 97 |
+
|
| 98 |
+
Let's explore the amazing world of birds together! 🦅
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
# Conversational agent with memory
|
| 102 |
+
CONVERSATIONAL_AGENT_PROMPT = """
|
| 103 |
+
You are a knowledgeable and friendly bird expert assistant. You have access to:
|
| 104 |
+
- Bird image classification tools
|
| 105 |
+
- eBird global bird sightings database
|
| 106 |
+
- Birding hotspot information
|
| 107 |
+
|
| 108 |
+
You remember our conversation history, so:
|
| 109 |
+
- Reference birds we've discussed previously
|
| 110 |
+
- Build on location context from earlier in the conversation
|
| 111 |
+
- Suggest related species or locations based on user interests
|
| 112 |
+
|
| 113 |
+
Keep responses concise but informative. Don't overwhelm the user with data, but if a user shows interest in a particular species or location, offer to provide more details.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
# BirdScope AI Web Interface Prompt (used in Gradio app)
|
| 117 |
+
BIRDSCOPE_AI_PROMPT = """You are BirdScope AI - an enthusiastic bird expert!
|
| 118 |
+
|
| 119 |
+
Help users identify birds and find amazing bird locations.
|
| 120 |
+
Always be educational and cite your sources.
|
| 121 |
+
|
| 122 |
+
**Your capabilities:**
|
| 123 |
+
- Identify birds from uploaded images using state-of-the-art classification
|
| 124 |
+
- Find recent bird sightings in any location using eBird data
|
| 125 |
+
- Recommend birding hotspots and popular locations
|
| 126 |
+
- Provide species information and interesting facts
|
| 127 |
+
|
| 128 |
+
**Response style:**
|
| 129 |
+
- Be friendly and encouraging
|
| 130 |
+
- Cite confidence scores for identifications
|
| 131 |
+
- Provide actionable location recommendations when asked
|
| 132 |
+
- Format responses clearly
|
| 133 |
+
|
| 134 |
+
Let's explore the amazing world of birds together!"""
|
| 135 |
+
|
| 136 |
+
AUDIO_FINDER_PROMPT = """You are BirdScope Audio Finder, a specialized agent for finding and retrieving bird audio recordings.
|
| 137 |
+
|
| 138 |
+
**Your Mission:**
|
| 139 |
+
Help us discover bird songs and calls by finding species with available audio recordings.
|
| 140 |
+
|
| 141 |
+
**Your Tools:**
|
| 142 |
+
1. **search_birds(name, family, region, status, page_size)**
|
| 143 |
+
- Multi-filter search for bird species
|
| 144 |
+
- IMPORTANT: At least ONE filter is required (name, family, region, or status)
|
| 145 |
+
- ⚠️ CRITICAL: DO NOT use `has_audio` as a parameter - it will cause an error!
|
| 146 |
+
- The API returns a `has_audio` field in RESULTS that you check after the call
|
| 147 |
+
- Available parameters ONLY: name, family, region, status, page_size
|
| 148 |
+
|
| 149 |
+
2. **get_bird_info(name)**
|
| 150 |
+
- Get complete species information
|
| 151 |
+
- Check `audio_count` field to verify recordings exist
|
| 152 |
+
|
| 153 |
+
3. **get_bird_audio(name, max_recordings)**
|
| 154 |
+
- Fetch actual audio recordings from xeno-canto.org
|
| 155 |
+
- Returns recording metadata and download URLs
|
| 156 |
+
- Only call this for birds that have `has_audio=true` in their search results
|
| 157 |
+
|
| 158 |
+
**CRITICAL WORKFLOW for "find audio for any bird":**
|
| 159 |
+
The API has NO `has_audio` filter parameter. You MUST use this two-step process:
|
| 160 |
+
|
| 161 |
+
1. **First**: Call `search_birds()` with ONE of these parameters:
|
| 162 |
+
- For "any bird": `region="North America"` (most coverage)
|
| 163 |
+
- For specific bird: `name="Bird Name"`
|
| 164 |
+
- For family search: `family="Family Name"`
|
| 165 |
+
- ⚠️ DO NOT include `has_audio` in the function call - it's not a valid parameter!
|
| 166 |
+
|
| 167 |
+
2. **Then**: Look at the RESULTS and find birds where `has_audio=true`
|
| 168 |
+
|
| 169 |
+
3. **Finally**: Call `get_bird_audio(name)` on a bird that has audio
|
| 170 |
+
|
| 171 |
+
**Known Birds With Audio:**
|
| 172 |
+
- Snow Goose, Common Goldeneye, Gadwall, Tundra Swan, Ross's Goose
|
| 173 |
+
- Use these as fallbacks if search yields no audio
|
| 174 |
+
|
| 175 |
+
**Response Style:**
|
| 176 |
+
- Always confirm which bird you're fetching audio for
|
| 177 |
+
- Mention the number of recordings available
|
| 178 |
+
- Provide recording details (location, date, type, recordist)
|
| 179 |
+
- **CRITICAL**: Include the FULL file_url from the API response in your text
|
| 180 |
+
Example: "Recording: https://xeno-canto.org/143610/download"
|
| 181 |
+
This ensures URLs are formatted as clickable links for the user
|
| 182 |
+
|
| 183 |
+
**Example Interaction:**
|
| 184 |
+
User: "Find audio recordings for any bird"
|
| 185 |
+
You:
|
| 186 |
+
1. Call search_birds(region="North America", page_size=20)
|
| 187 |
+
2. Filter results for has_audio=true
|
| 188 |
+
3. Pick first bird with audio (e.g., "Black-bellied Whistling-Duck")
|
| 189 |
+
4. Call get_bird_audio("Black-bellied Whistling-Duck", max_recordings=1)
|
| 190 |
+
5. Present: "Found 10 recordings for Black-bellied Whistling-Duck. Here's a sample:
|
| 191 |
+
|
| 192 |
+
Recording from Orlando Wetlands, Florida (July 18, 2013)
|
| 193 |
+
Recordist: Paul Marvin
|
| 194 |
+
Quality: A
|
| 195 |
+
https://xeno-canto.org/143610/download"
|
| 196 |
+
|
| 197 |
+
**Error Handling:**
|
| 198 |
+
- If search returns no birds: suggest broadening filters
|
| 199 |
+
- If no birds have audio: inform user and suggest known birds with audio
|
| 200 |
+
- If get_bird_audio fails: the bird may not have recordings despite database indicating otherwise
|
| 201 |
+
"""
|
| 202 |
+
|
| 203 |
+
def get_prompt_for_agent_type(agent_type: str) -> str:
|
| 204 |
+
"""Get the appropriate prompt for the agent type."""
|
| 205 |
+
prompts = {
|
| 206 |
+
"classifier": CLASSIFIER_AGENT_PROMPT,
|
| 207 |
+
"multi_server": MULTI_SERVER_AGENT_PROMPT,
|
| 208 |
+
"conversational": CONVERSATIONAL_AGENT_PROMPT,
|
| 209 |
+
"birdscope_ai": BIRDSCOPE_AI_PROMPT,
|
| 210 |
+
"nuthatch_birdscope": NUTHATCH_BIRDSCOPE_PROMPT
|
| 211 |
+
}
|
| 212 |
+
return prompts.get(agent_type, CLASSIFIER_AGENT_PROMPT)
|
| 213 |
+
|
langgraph_agent/simple_demo.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Simple LangGraph agent demo connecting to Modal bird classifier MCP
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
import os
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 9 |
+
from langchain.agents import create_agent
|
| 10 |
+
from langchain_openai import ChatOpenAI
|
| 11 |
+
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
async def create_bird_agent():
|
| 15 |
+
"""Create and return a bird classification agent."""
|
| 16 |
+
|
| 17 |
+
# 1. Configure MCP client for Modal bird classifier
|
| 18 |
+
print("[STATUS]: Connecting to Modal MCP server...")
|
| 19 |
+
|
| 20 |
+
client = MultiServerMCPClient({
|
| 21 |
+
"bird_classifier": {
|
| 22 |
+
"transport": "streamable_http",
|
| 23 |
+
"url": os.getenv("MODAL_MCP_URL"),
|
| 24 |
+
"headers": {
|
| 25 |
+
"X-API-Key": os.getenv("BIRD_CLASSIFIER_API_KEY")
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
+
})
|
| 29 |
+
|
| 30 |
+
# 2. Get tools from MCP server
|
| 31 |
+
print("[STATUS]: Loading MCP tools...")
|
| 32 |
+
tools = await client.get_tools()
|
| 33 |
+
print(f"[LOADED]: {len(tools)} tools - {[t.name for t in tools]}")
|
| 34 |
+
|
| 35 |
+
# 3. Create agent with model and tools
|
| 36 |
+
print("[STATUS]: Creating LangGraph agent...")
|
| 37 |
+
agent = create_agent(
|
| 38 |
+
model=ChatOpenAI(
|
| 39 |
+
model="gpt-4o-mini",
|
| 40 |
+
temperature=0
|
| 41 |
+
),
|
| 42 |
+
tools=tools,
|
| 43 |
+
system_prompt="""
|
| 44 |
+
You are a helpful bird identification assistant.
|
| 45 |
+
When a user provides an image URL, use the classify_from_url tool to identify the bird species.
|
| 46 |
+
Always report the species name and confidence score in a friendly manner."""
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
return agent
|
| 50 |
+
|
| 51 |
+
async def main():
|
| 52 |
+
"""Run simple bird classification agent demo"""
|
| 53 |
+
|
| 54 |
+
# Create agent
|
| 55 |
+
agent = await create_bird_agent()
|
| 56 |
+
|
| 57 |
+
# 4. Test with a bird image URL
|
| 58 |
+
print("\n"+"="*70)
|
| 59 |
+
print("[STATUS]: Testing bird classification...")
|
| 60 |
+
print("="*70+"\n")
|
| 61 |
+
|
| 62 |
+
test_url = "https://images.unsplash.com/photo-1445820200644-69f87d946277?w=400&auto=format&fit=crop&q=60&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxzZWFyY2h8MTV8fGJpcmR8ZW58MHx8MHx8fDA%3D"
|
| 63 |
+
|
| 64 |
+
result = await agent.ainvoke({
|
| 65 |
+
"messages": [{
|
| 66 |
+
"role": "user",
|
| 67 |
+
"content": f"What bird species is this? {test_url}"
|
| 68 |
+
}]
|
| 69 |
+
})
|
| 70 |
+
|
| 71 |
+
# 5. Print results
|
| 72 |
+
print("\n[AGENT RESPONSE]:")
|
| 73 |
+
print(result["messages"][-1].content)
|
| 74 |
+
|
| 75 |
+
print("\n[DEMO COMPLETE!]")
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
asyncio.run(main())
|
langgraph_agent/structured_output.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Structured output parsing using LlamaIndex Pydantic Programs.
|
| 3 |
+
Ensures consistent image formatting in agent responses.
|
| 4 |
+
|
| 5 |
+
HACKATHON OPTIMIZED: Uses regex extraction instead of LLM calls for speed.
|
| 6 |
+
"""
|
| 7 |
+
from typing import List, Optional
|
| 8 |
+
import re
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BirdIdentificationResponse(BaseModel):
|
| 13 |
+
"""Structured response for bird identification using LlamaIndex Pydantic."""
|
| 14 |
+
|
| 15 |
+
summary: str = Field(
|
| 16 |
+
description="Main response text with bird identification, facts, or information"
|
| 17 |
+
)
|
| 18 |
+
species_name: Optional[str] = Field(
|
| 19 |
+
default=None,
|
| 20 |
+
description="Common name of the bird species (e.g., 'Northern Cardinal')"
|
| 21 |
+
)
|
| 22 |
+
image_urls: List[str] = Field(
|
| 23 |
+
default_factory=list,
|
| 24 |
+
description="List of image URLs to display for this bird"
|
| 25 |
+
)
|
| 26 |
+
audio_urls: List[str] = Field(
|
| 27 |
+
default_factory=list,
|
| 28 |
+
description="List of audio URLs (bird calls/songs)"
|
| 29 |
+
)
|
| 30 |
+
confidence_score: Optional[float] = Field(
|
| 31 |
+
default=None,
|
| 32 |
+
description="Confidence score from classifier (0.0-1.0)"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def extract_urls_from_text(text: str) -> tuple[List[str], List[str]]:
|
| 37 |
+
"""
|
| 38 |
+
Extract image and audio URLs from text using regex.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
tuple: (image_urls, audio_urls)
|
| 42 |
+
"""
|
| 43 |
+
# Pattern for image URLs (jpg, jpeg, png, gif, webp, svg)
|
| 44 |
+
image_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+\.(?:jpg|jpeg|png|gif|webp|svg)(?:\?[^\s]*)?'
|
| 45 |
+
|
| 46 |
+
# Pattern for audio URLs - handles both direct audio files AND xeno-canto links
|
| 47 |
+
# Matches: .mp3, .wav, .ogg, .m4a files OR xeno-canto.org URLs with /download
|
| 48 |
+
audio_pattern_files = r'https?://[^\s<>"{}|\\^`\[\]]+\.(?:mp3|wav|ogg|m4a)(?:\?[^\s]*)?'
|
| 49 |
+
audio_pattern_xenocanto = r'https?://xeno-canto\.org/\d+/download'
|
| 50 |
+
|
| 51 |
+
# Extract all URLs
|
| 52 |
+
image_urls = list(set(re.findall(image_pattern, text, re.IGNORECASE)))
|
| 53 |
+
audio_urls_files = list(set(re.findall(audio_pattern_files, text, re.IGNORECASE)))
|
| 54 |
+
audio_urls_xenocanto = list(set(re.findall(audio_pattern_xenocanto, text, re.IGNORECASE)))
|
| 55 |
+
|
| 56 |
+
# Combine both types of audio URLs
|
| 57 |
+
audio_urls = audio_urls_files + audio_urls_xenocanto
|
| 58 |
+
|
| 59 |
+
return image_urls, audio_urls
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def extract_species_name(text: str) -> Optional[str]:
|
| 63 |
+
"""
|
| 64 |
+
Try to extract species name from common patterns in response.
|
| 65 |
+
"""
|
| 66 |
+
# Pattern: "identified as SPECIES NAME" or "species: SPECIES NAME"
|
| 67 |
+
patterns = [
|
| 68 |
+
r'identified as[:\s]+([A-Z][a-z]+(?:\s+[A-Z][a-z]+){0,3})',
|
| 69 |
+
r'species[:\s]+([A-Z][a-z]+(?:\s+[A-Z][a-z]+){0,3})',
|
| 70 |
+
r'This is (?:a |an )?([A-Z][a-z]+(?:\s+[A-Z][a-z]+){0,3})',
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
for pattern in patterns:
|
| 74 |
+
match = re.search(pattern, text)
|
| 75 |
+
if match:
|
| 76 |
+
return match.group(1)
|
| 77 |
+
|
| 78 |
+
return None
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
async def parse_agent_response(
|
| 82 |
+
raw_response: str,
|
| 83 |
+
provider: str,
|
| 84 |
+
api_key: str,
|
| 85 |
+
model: str
|
| 86 |
+
) -> str:
|
| 87 |
+
"""
|
| 88 |
+
Parse agent response into structured format and reformat with guaranteed markdown.
|
| 89 |
+
|
| 90 |
+
OPTIMIZED FOR HACKATHON: Uses regex extraction instead of LLM call.
|
| 91 |
+
Still uses LlamaIndex Pydantic models for structured data.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
raw_response: The agent's raw text response
|
| 95 |
+
provider: LLM provider ("openai", "anthropic", "huggingface")
|
| 96 |
+
api_key: API key (unused in optimized version)
|
| 97 |
+
model: Model name (unused in optimized version)
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
Formatted markdown response with guaranteed image syntax
|
| 101 |
+
"""
|
| 102 |
+
try:
|
| 103 |
+
print("[STRUCTURED OUTPUT] Starting parsing...")
|
| 104 |
+
|
| 105 |
+
# Extract URLs using regex (fast, no API call)
|
| 106 |
+
image_urls, audio_urls = extract_urls_from_text(raw_response)
|
| 107 |
+
|
| 108 |
+
print(f"[STRUCTURED OUTPUT] Found {len(image_urls)} images, {len(audio_urls)} audio files")
|
| 109 |
+
|
| 110 |
+
# Extract species name if possible
|
| 111 |
+
species_name = extract_species_name(raw_response)
|
| 112 |
+
|
| 113 |
+
# Create structured response using LlamaIndex Pydantic model
|
| 114 |
+
structured = BirdIdentificationResponse(
|
| 115 |
+
summary=raw_response, # Keep full response as summary
|
| 116 |
+
species_name=species_name,
|
| 117 |
+
image_urls=image_urls,
|
| 118 |
+
audio_urls=audio_urls,
|
| 119 |
+
confidence_score=None # Could extract with regex if needed
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# Check if we found any media to format
|
| 123 |
+
if not structured.image_urls and not structured.audio_urls:
|
| 124 |
+
print("[STRUCTURED OUTPUT] No images or audio found, returning original")
|
| 125 |
+
return raw_response
|
| 126 |
+
|
| 127 |
+
# Reformat into markdown with guaranteed images
|
| 128 |
+
formatted_parts = []
|
| 129 |
+
|
| 130 |
+
# Main summary (but remove already-formatted images/audio to avoid duplication)
|
| 131 |
+
clean_summary = raw_response
|
| 132 |
+
for url in image_urls:
|
| 133 |
+
# Remove existing markdown images
|
| 134 |
+
clean_summary = re.sub(rf'!\[([^\]]*)\]\({re.escape(url)}\)', '', clean_summary)
|
| 135 |
+
# Remove plain URLs
|
| 136 |
+
clean_summary = clean_summary.replace(url, '')
|
| 137 |
+
|
| 138 |
+
for url in audio_urls:
|
| 139 |
+
# Remove audio URLs from summary
|
| 140 |
+
clean_summary = clean_summary.replace(url, '')
|
| 141 |
+
|
| 142 |
+
formatted_parts.append(clean_summary.strip())
|
| 143 |
+
|
| 144 |
+
# Add images with markdown syntax
|
| 145 |
+
if structured.image_urls:
|
| 146 |
+
formatted_parts.append("\n### Images\n")
|
| 147 |
+
for idx, url in enumerate(structured.image_urls, 1):
|
| 148 |
+
# Use species name if available, otherwise generic
|
| 149 |
+
alt_text = structured.species_name or f"Bird {idx}"
|
| 150 |
+
formatted_parts.append(f"")
|
| 151 |
+
|
| 152 |
+
# Add audio links if present
|
| 153 |
+
if structured.audio_urls:
|
| 154 |
+
formatted_parts.append("\n### Audio Recordings\n")
|
| 155 |
+
for idx, url in enumerate(structured.audio_urls, 1):
|
| 156 |
+
# Strip /download from xeno-canto URLs for browser-friendly links
|
| 157 |
+
display_url = url.replace("/download", "") if "xeno-canto.org" in url else url
|
| 158 |
+
formatted_parts.append(f"🔊 [Listen to recording {idx}]({display_url})")
|
| 159 |
+
|
| 160 |
+
result = "\n\n".join(formatted_parts)
|
| 161 |
+
print(f"[STRUCTURED OUTPUT] ✅ Successfully formatted response")
|
| 162 |
+
return result
|
| 163 |
+
|
| 164 |
+
except Exception as e:
|
| 165 |
+
# Fallback: return original response if parsing fails
|
| 166 |
+
print(f"[STRUCTURED OUTPUT] ❌ Parsing failed: {e}")
|
| 167 |
+
return raw_response
|
langgraph_agent/subagent_config.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Specialized Subagent Configuration
|
| 3 |
+
|
| 4 |
+
Defines specialized agents with focused tool subsets for better performance.
|
| 5 |
+
Uses SubAgentMiddleware pattern from LangGraph deep agents.
|
| 6 |
+
"""
|
| 7 |
+
from typing import Dict, List
|
| 8 |
+
from .config import AgentConfig
|
| 9 |
+
from .prompts import NUTHATCH_BIRDSCOPE_PROMPT, AUDIO_FINDER_PROMPT
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SubAgentConfig:
|
| 13 |
+
"""Configuration for specialized subagents."""
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def get_mode_definitions() -> Dict[str, Dict]:
|
| 17 |
+
"""
|
| 18 |
+
Define agent modes (how subagents are composed).
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Dict mapping mode names to their configurations
|
| 22 |
+
"""
|
| 23 |
+
return {
|
| 24 |
+
"Specialized Subagents (3 Specialists)": {
|
| 25 |
+
"description": "Router orchestrates 3 specialized agents",
|
| 26 |
+
"subagents": ["image_identifier", "species_explorer", "taxonomy_specialist"],
|
| 27 |
+
"use_router": True
|
| 28 |
+
},
|
| 29 |
+
"Audio Finder Agent": {
|
| 30 |
+
"description": "Specialized agent for finding birds with audio recordings",
|
| 31 |
+
"subagents": ["generalist"],
|
| 32 |
+
"use_router": False
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def get_subagent_definitions() -> Dict[str, Dict]:
|
| 38 |
+
"""
|
| 39 |
+
Define specialized subagents with their tool subsets and prompts.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
Dict mapping subagent names to their configurations
|
| 43 |
+
"""
|
| 44 |
+
return {
|
| 45 |
+
"generalist": {
|
| 46 |
+
"name": "BirdScope AI Generalist",
|
| 47 |
+
"description": "All-in-one bird identification expert with access to all tools",
|
| 48 |
+
"tools": [
|
| 49 |
+
"search_birds", # Required to find any birds
|
| 50 |
+
"get_bird_info", # Get details including audio count
|
| 51 |
+
"get_bird_audio" # Fetch actual audio recordings
|
| 52 |
+
],
|
| 53 |
+
"prompt": AUDIO_FINDER_PROMPT, # We'll create this next
|
| 54 |
+
"temperature": AgentConfig.OPENAI_TEMPERATURE,
|
| 55 |
+
},
|
| 56 |
+
"image_identifier": {
|
| 57 |
+
"name": "Image Identification Specialist",
|
| 58 |
+
"description": "Expert at identifying birds from images and providing initial species information",
|
| 59 |
+
"tools": [
|
| 60 |
+
"classify_from_url",
|
| 61 |
+
"classify_from_base64",
|
| 62 |
+
"get_bird_info",
|
| 63 |
+
"get_bird_images"
|
| 64 |
+
],
|
| 65 |
+
"prompt": """You are an Image Identification Specialist focused on bird recognition.
|
| 66 |
+
**Your Role:**
|
| 67 |
+
1. Use classification tools to identify birds from uploaded images
|
| 68 |
+
2. Provide accurate species identification with confidence scores
|
| 69 |
+
3. Fetch basic species information (taxonomy, size, status)
|
| 70 |
+
4. Show reference images to help users verify identification
|
| 71 |
+
|
| 72 |
+
**Response Style:**
|
| 73 |
+
- Lead with the bird's common name and scientific name
|
| 74 |
+
- Always cite confidence scores from classifier
|
| 75 |
+
- Describe key identifying features visible in the image
|
| 76 |
+
- Show reference images using markdown image syntax: 
|
| 77 |
+
- Mention if confidence is low and suggest why
|
| 78 |
+
- Keep responses focused and concise
|
| 79 |
+
|
| 80 |
+
**When to defer:**
|
| 81 |
+
- For audio recordings -> species_explorer
|
| 82 |
+
- For family/taxonomy queries -> taxonomy_specialist
|
| 83 |
+
- For conservation status searches -> taxonomy_specialist
|
| 84 |
+
""",
|
| 85 |
+
"temperature": AgentConfig.OPENAI_TEMPERATURE,
|
| 86 |
+
},
|
| 87 |
+
"species_explorer": {
|
| 88 |
+
"name": "Species Exploration Specialist",
|
| 89 |
+
"description": "Expert at finding birds by name, exploring families, and providing multimedia content",
|
| 90 |
+
"tools": [
|
| 91 |
+
"search_birds",
|
| 92 |
+
"get_bird_info",
|
| 93 |
+
"get_bird_images",
|
| 94 |
+
"get_bird_audio",
|
| 95 |
+
"search_by_family"
|
| 96 |
+
],
|
| 97 |
+
"prompt": """You are a Species Exploration specialist who helps users learn about birds.
|
| 98 |
+
|
| 99 |
+
**Your Role:**
|
| 100 |
+
1. Search for birds by common name or partial matches
|
| 101 |
+
2. Provide comprehensive species profiles with images and audio
|
| 102 |
+
3. Show related species in the same family
|
| 103 |
+
4. Help users discover new birds based on their interests
|
| 104 |
+
|
| 105 |
+
**Search Strategy (IMPORTANT):**
|
| 106 |
+
- If a search returns no results, try progressively simpler queries:
|
| 107 |
+
* "Rock Dove" → try "Dove"
|
| 108 |
+
* "Northern Cardinal" → try "Cardinal"
|
| 109 |
+
* "Red-tailed Hawk" → try "Hawk"
|
| 110 |
+
- Return the closest relevant match and explain what you found
|
| 111 |
+
- If still no results, suggest similar species the user might be interested in
|
| 112 |
+
|
| 113 |
+
**Response Style:**
|
| 114 |
+
- Be enthusiastic and educational
|
| 115 |
+
- Always provide images when available using markdown image syntax: 
|
| 116 |
+
- Offer audio recordings to help users learn bird calls (if available)
|
| 117 |
+
- Suggest related species users might enjoy
|
| 118 |
+
- Describe what makes each bird unique
|
| 119 |
+
- If you had to search multiple times, mention it briefly: "I found information on Dove (the database uses this simplified name)"
|
| 120 |
+
|
| 121 |
+
**When to defer:**
|
| 122 |
+
- For image identification -> image_identifier
|
| 123 |
+
- For conservation status filtering -> taxonomy_specialist
|
| 124 |
+
- For broad taxonomy questions -> taxonomy_specialist
|
| 125 |
+
""",
|
| 126 |
+
"temperature": 0.1, # slightly creative for educational content
|
| 127 |
+
},
|
| 128 |
+
"taxonomy_specialist": {
|
| 129 |
+
"name": "Taxonomy & Conservation Specialist",
|
| 130 |
+
"description": "Expert at bird families, taxonomic classification, and conservation status",
|
| 131 |
+
"tools": [
|
| 132 |
+
"filter_by_status",
|
| 133 |
+
"search_by_family",
|
| 134 |
+
"get_all_families",
|
| 135 |
+
"get_bird_info"
|
| 136 |
+
],
|
| 137 |
+
"prompt": """You are a Taxonomy & Conservation Specialist with deep knowledge of bird classification.
|
| 138 |
+
|
| 139 |
+
**Your Role:**
|
| 140 |
+
1. Explain bird family relationships and taxonomic structure
|
| 141 |
+
2. Find birds by conservation status
|
| 142 |
+
3. Provide comprehensive family overviews
|
| 143 |
+
4. Educate users about bird conservation
|
| 144 |
+
|
| 145 |
+
**Search Strategy (IMPORTANT):**
|
| 146 |
+
- When searching by family name, if no results:
|
| 147 |
+
* Try variations: "Cardinalidae" → "Cardinal"
|
| 148 |
+
* Try broader terms: specific family → general group
|
| 149 |
+
- Return the closest match and explain any differences
|
| 150 |
+
- If user asks about a species but you only have family tools, get basic info with get_bird_info
|
| 151 |
+
|
| 152 |
+
**Response Style:**
|
| 153 |
+
- Use proper taxonomic terminology but explain it clearly
|
| 154 |
+
- Emphasize conservation status and threats
|
| 155 |
+
- Show how species relate within families
|
| 156 |
+
- Provide context about family characteristics
|
| 157 |
+
- Be educational but accessible
|
| 158 |
+
- If you had to adjust the search, explain briefly: "The database lists this family as..."
|
| 159 |
+
|
| 160 |
+
**When to defer:**
|
| 161 |
+
- For image identification -> image_identifier
|
| 162 |
+
- For audio or species discovery -> species_explorer
|
| 163 |
+
- For specific species details -> species_explorer
|
| 164 |
+
""",
|
| 165 |
+
"temperature": AgentConfig.OPENAI_TEMPERATURE,
|
| 166 |
+
}
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
@staticmethod
|
| 170 |
+
def get_router_prompt() -> str:
|
| 171 |
+
"""
|
| 172 |
+
Prompt for the supervisor agent that routes to subagents.
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
Supervisor agent system prompt
|
| 176 |
+
"""
|
| 177 |
+
return """You are BirdScope AI Supervisor - an intelligent orchestrator for bird identification.
|
| 178 |
+
|
| 179 |
+
**Your Team:**
|
| 180 |
+
- **image_identifier**: Identifies birds from photos using ML classification
|
| 181 |
+
- **species_explorer**: Searches species by name, provides multimedia (images/audio)
|
| 182 |
+
- **taxonomy_specialist**: Conservation status, taxonomic families, classification
|
| 183 |
+
|
| 184 |
+
**Your Role:**
|
| 185 |
+
Analyze each user request and route it to the MOST appropriate specialist.
|
| 186 |
+
|
| 187 |
+
**Routing Guidelines:**
|
| 188 |
+
1. **Image uploads/URLs** → image_identifier (has classification tools)
|
| 189 |
+
2. **"Show me"/"Find"/"Search" + species name** → species_explorer (has search tools)
|
| 190 |
+
3. **"Audio"/"sound"/"call"/"song"** → species_explorer (has audio tools)
|
| 191 |
+
4. **"Family"/"families" + broad questions** → taxonomy_specialist (has family tools)
|
| 192 |
+
5. **"Conservation"/"endangered"/"threatened"** → taxonomy_specialist (has status filters)
|
| 193 |
+
6. **"Related species"/"similar birds"** → species_explorer (explores connections)
|
| 194 |
+
|
| 195 |
+
**Decision-making:**
|
| 196 |
+
- Consider the user's INTENT, not just keywords
|
| 197 |
+
- Route to ONE specialist at a time
|
| 198 |
+
- Trust your specialists' expertise
|
| 199 |
+
- After specialist responds, you can route follow-ups to different specialists
|
| 200 |
+
|
| 201 |
+
**Important:**
|
| 202 |
+
- Be decisive - route quickly
|
| 203 |
+
- Don't duplicate specialist work - let them handle their domain
|
| 204 |
+
- Synthesize multi-turn conversations if needed
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
@staticmethod
|
| 208 |
+
def get_mode_config(mode_name: str) -> Dict:
|
| 209 |
+
"""
|
| 210 |
+
Get configuration for a specific mode.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
mode_name: Name of the mode (e.g., "Single Agent (All Tools)")
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
Mode configuration dict
|
| 217 |
+
"""
|
| 218 |
+
modes = SubAgentConfig.get_mode_definitions()
|
| 219 |
+
if mode_name not in modes:
|
| 220 |
+
raise ValueError(f"Unknown mode: {mode_name}. Available: {list(modes.keys())}")
|
| 221 |
+
return modes[mode_name]
|
langgraph_agent/subagent_factory.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Subagent Factory
|
| 3 |
+
|
| 4 |
+
Creates specialized agents with filtered tool subsets.
|
| 5 |
+
"""
|
| 6 |
+
from typing import List, Dict, Any
|
| 7 |
+
from langchain_core.language_models import BaseChatModel
|
| 8 |
+
from langchain.agents import create_agent
|
| 9 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 10 |
+
from .subagent_config import SubAgentConfig
|
| 11 |
+
from .config import AgentConfig
|
| 12 |
+
|
| 13 |
+
class SubAgentFactory:
|
| 14 |
+
"""Factory for creating specialized subagents."""
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
async def create_subagent(
|
| 18 |
+
subagent_name: str,
|
| 19 |
+
all_tools: List[Any],
|
| 20 |
+
llm: BaseChatModel
|
| 21 |
+
):
|
| 22 |
+
"""
|
| 23 |
+
Create a specialized subagent with filtered tools.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
subagent_name: Name of the subagent (e.g., "image_identifier")
|
| 27 |
+
all_tools: Full list of available tools
|
| 28 |
+
llm: Language model instance
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
LangGraph agent configured for the subagent
|
| 32 |
+
"""
|
| 33 |
+
# Get subagent configuration
|
| 34 |
+
definitions = SubAgentConfig.get_subagent_definitions()
|
| 35 |
+
|
| 36 |
+
if subagent_name not in definitions:
|
| 37 |
+
raise ValueError(f"Unknown subagent: {subagent_name}")
|
| 38 |
+
|
| 39 |
+
config = definitions[subagent_name]
|
| 40 |
+
|
| 41 |
+
# Filter tools for this subagent
|
| 42 |
+
allowed_tool_names = set(config["tools"])
|
| 43 |
+
subagent_tools = [
|
| 44 |
+
tool for tool in all_tools
|
| 45 |
+
if tool.name in allowed_tool_names
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
print(f"[SUBAGENT]: Creating {config['name']}")
|
| 49 |
+
print(f" • Tools: {', '.join([t.name for t in subagent_tools])}")
|
| 50 |
+
|
| 51 |
+
# Create specialized agent with filtered tools and name
|
| 52 |
+
# Note: create_agent auto-compiles, so we pass name directly
|
| 53 |
+
agent = create_agent(
|
| 54 |
+
model=llm,
|
| 55 |
+
tools=subagent_tools,
|
| 56 |
+
system_prompt=config["prompt"],
|
| 57 |
+
name=subagent_name
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
return agent
|
| 61 |
+
|
| 62 |
+
@staticmethod
|
| 63 |
+
async def create_all_subagents(
|
| 64 |
+
all_tools: List[Any],
|
| 65 |
+
llm: BaseChatModel
|
| 66 |
+
) -> Dict[str, Any]:
|
| 67 |
+
"""
|
| 68 |
+
Create all specialized subagents.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
all_tools: Full list of available tools
|
| 72 |
+
llm: Language model instance
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
Dict mapping subagent names to agent instances
|
| 76 |
+
"""
|
| 77 |
+
definitions = SubAgentConfig.get_subagent_definitions()
|
| 78 |
+
subagents = {}
|
| 79 |
+
|
| 80 |
+
for name in definitions.keys():
|
| 81 |
+
subagents[name] = await SubAgentFactory.create_subagent(
|
| 82 |
+
name, all_tools, llm
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
return subagents
|
langgraph_agent/subagent_router.py.legacy
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Subagent Router
|
| 3 |
+
|
| 4 |
+
Orchestrates routing between specialized subagents using LangGraph's
|
| 5 |
+
delegation pattern.
|
| 6 |
+
"""
|
| 7 |
+
from typing import Dict, Any, List, Literal
|
| 8 |
+
from langchain_core.language_models import BaseChatModel
|
| 9 |
+
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
|
| 10 |
+
from langgraph.graph import StateGraph, MessagesState, START, END
|
| 11 |
+
from langgraph.prebuilt import ToolNode
|
| 12 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 13 |
+
from .subagent_config import SubAgentConfig
|
| 14 |
+
from .subagent_factory import SubAgentFactory
|
| 15 |
+
|
| 16 |
+
async def create_router_agent(all_tools: List[Any], llm: BaseChatModel):
|
| 17 |
+
"""
|
| 18 |
+
Create a router agent that orchestrates specialized subagents.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
all_tools: Full list of available MCP tools
|
| 22 |
+
llm: Language model for the router
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
Compiled LangGraph workflow
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
async def router_node(state: MessagesState):
|
| 29 |
+
"""Main router that delegates to subagents."""
|
| 30 |
+
# Get routing instructions
|
| 31 |
+
router_prompt = SubAgentConfig.get_router_prompt()
|
| 32 |
+
|
| 33 |
+
# Add system message with routing instructions
|
| 34 |
+
messages = [SystemMessage(content=router_prompt)] + state["messages"]
|
| 35 |
+
|
| 36 |
+
# Router decides which subagent to use
|
| 37 |
+
response = await llm.ainvoke(messages)
|
| 38 |
+
|
| 39 |
+
# Extract subagent name from response (you could make this more sophisticated)
|
| 40 |
+
# For now, the router will use tools to delegate
|
| 41 |
+
return {"messages": [response]}
|
| 42 |
+
|
| 43 |
+
async def create_subagent_node(subagent_name: str):
|
| 44 |
+
"""Create a node for a specific subagent."""
|
| 45 |
+
async def subagent_node(state: MessagesState):
|
| 46 |
+
# Create the specialized subagent
|
| 47 |
+
subagent = await SubAgentFactory.create_subagent(
|
| 48 |
+
subagent_name, all_tools, llm
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# Run the subagent
|
| 52 |
+
result = await subagent.ainvoke(state)
|
| 53 |
+
return result
|
| 54 |
+
|
| 55 |
+
return subagent_node
|
| 56 |
+
|
| 57 |
+
# Build the graph
|
| 58 |
+
workflow = StateGraph(MessagesState)
|
| 59 |
+
|
| 60 |
+
# Add nodes
|
| 61 |
+
workflow.add_node("router", router_node)
|
| 62 |
+
workflow.add_node("image_identifier", await create_subagent_node("image_identifier"))
|
| 63 |
+
workflow.add_node("species_explorer", await create_subagent_node("species_explorer"))
|
| 64 |
+
workflow.add_node("taxonomy_specialist", await create_subagent_node("taxonomy_specialist"))
|
| 65 |
+
|
| 66 |
+
# Define routing logic
|
| 67 |
+
def route_to_specialist(state: MessagesState) -> Literal["image_identifier", "species_explorer", "taxonomy_specialist", END]:
|
| 68 |
+
"""Route based on last message content."""
|
| 69 |
+
last_message = state["messages"][-1]
|
| 70 |
+
content = last_message.content.lower()
|
| 71 |
+
|
| 72 |
+
# Simple keyword-based routing (could be improved with LLM classification)
|
| 73 |
+
if any(word in content for word in ["identify", "what bird", "classify", "image", "photo"]):
|
| 74 |
+
return "image_identifier"
|
| 75 |
+
elif any(word in content for word in ["audio", "sound", "call", "song", "find", "search"]):
|
| 76 |
+
return "species_explorer"
|
| 77 |
+
elif any(word in content for word in ["family", "families", "conservation", "endangered", "taxonomy"]):
|
| 78 |
+
return "taxonomy_specialist"
|
| 79 |
+
else:
|
| 80 |
+
# Default to species explorer for general queries
|
| 81 |
+
return "species_explorer"
|
| 82 |
+
|
| 83 |
+
# Connect nodes
|
| 84 |
+
workflow.add_edge(START, "router")
|
| 85 |
+
workflow.add_conditional_edges("router", route_to_specialist)
|
| 86 |
+
workflow.add_edge("image_identifier", END)
|
| 87 |
+
workflow.add_edge("species_explorer", END)
|
| 88 |
+
workflow.add_edge("taxonomy_specialist", END)
|
| 89 |
+
|
| 90 |
+
# Compile with memory for conversation context
|
| 91 |
+
return workflow.compile(checkpointer=InMemorySaver())
|
langgraph_agent/subagent_supervisor.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Subagent Supervisor
|
| 3 |
+
|
| 4 |
+
Uses LangGraph's create_supervisor() for LLM-based routing between specialists.
|
| 5 |
+
"""
|
| 6 |
+
from typing import List, Any
|
| 7 |
+
from langchain_core.language_models import BaseChatModel
|
| 8 |
+
from langchain.agents import create_agent
|
| 9 |
+
from langgraph.graph import StateGraph, MessagesState, START, END
|
| 10 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 11 |
+
from .subagent_config import SubAgentConfig
|
| 12 |
+
from .subagent_factory import SubAgentFactory
|
| 13 |
+
|
| 14 |
+
async def create_supervisor_workflow(all_tools: List[Any], llm: BaseChatModel):
|
| 15 |
+
"""
|
| 16 |
+
Create a supervisor workflow that orchestrates specialized subagents.
|
| 17 |
+
|
| 18 |
+
The supervisor uses LLM-based routing to delegate tasks to the most
|
| 19 |
+
appropriate specialist agent.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
all_tools: Full list of available MCP tools
|
| 23 |
+
llm: Language model for both supervisor and subagents
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
Compiled LangGraph workflow with supervisor
|
| 27 |
+
"""
|
| 28 |
+
from langgraph_supervisor import create_supervisor
|
| 29 |
+
|
| 30 |
+
# Create the three specialist agents
|
| 31 |
+
print("[SUPERVISOR]: Creating specialist agents...")
|
| 32 |
+
|
| 33 |
+
image_agent = await SubAgentFactory.create_subagent(
|
| 34 |
+
"image_identifier", all_tools, llm
|
| 35 |
+
)
|
| 36 |
+
species_agent = await SubAgentFactory.create_subagent(
|
| 37 |
+
"species_explorer", all_tools, llm
|
| 38 |
+
)
|
| 39 |
+
taxonomy_agent = await SubAgentFactory.create_subagent(
|
| 40 |
+
"taxonomy_specialist", all_tools, llm
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# Create supervisor with LLM-based routing
|
| 44 |
+
print("[SUPERVISOR]: Creating supervisor orchestrator...")
|
| 45 |
+
|
| 46 |
+
# create_supervisor takes a list of agents as first positional argument
|
| 47 |
+
workflow = create_supervisor(
|
| 48 |
+
[image_agent, species_agent, taxonomy_agent],
|
| 49 |
+
model=llm,
|
| 50 |
+
prompt=SubAgentConfig.get_router_prompt()
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# Compile with shared memory for conversation context
|
| 54 |
+
print("[SUPERVISOR]: Compiling workflow with memory...")
|
| 55 |
+
return workflow.compile(checkpointer=InMemorySaver())
|
langgraph_agent/test_agent.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test suite for bird classifier agents.
|
| 3 |
+
"""
|
| 4 |
+
import asyncio
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
# Add parent directory to path so imports work from any location
|
| 9 |
+
parent_dir = Path(__file__).parent.parent
|
| 10 |
+
if str(parent_dir) not in sys.path:
|
| 11 |
+
sys.path.insert(0, str(parent_dir))
|
| 12 |
+
|
| 13 |
+
from langgraph_agent import AgentFactory
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
async def test_classifier_agent():
|
| 17 |
+
"""Test basic classifier agent with multiple images."""
|
| 18 |
+
|
| 19 |
+
print("\n" + "="*70)
|
| 20 |
+
print("Test Suite: Basic Classifier Agent")
|
| 21 |
+
print("="*70 + "\n")
|
| 22 |
+
|
| 23 |
+
# Create agent
|
| 24 |
+
agent = await AgentFactory.create_classifier_agent()
|
| 25 |
+
|
| 26 |
+
test_urls = [
|
| 27 |
+
"https://images.unsplash.com/photo-1555169062-013468b47731?w=400",
|
| 28 |
+
"https://images.unsplash.com/photo-1445820200644-69f87d946277?w=400",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
for i, url in enumerate(test_urls, 1):
|
| 32 |
+
print(f"\n[TEST {i}/{len(test_urls)}]")
|
| 33 |
+
print("="*70)
|
| 34 |
+
|
| 35 |
+
result = await agent.ainvoke({
|
| 36 |
+
"messages": [{
|
| 37 |
+
"role": "user",
|
| 38 |
+
"content": f"Classify the bird in this image: {url}"
|
| 39 |
+
}]
|
| 40 |
+
})
|
| 41 |
+
|
| 42 |
+
print(f"\n[RESULT]: {result['messages'][-1].content}\n")
|
| 43 |
+
|
| 44 |
+
print("\n[ALL TESTS COMPLETE!]\n")
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
async def test_multi_server_agent():
|
| 48 |
+
"""Test multi-server agent with classifier + eBird."""
|
| 49 |
+
|
| 50 |
+
print("\n" + "="*70)
|
| 51 |
+
print("Test Suite: Multi-Server Agent")
|
| 52 |
+
print("="*70 + "\n")
|
| 53 |
+
|
| 54 |
+
# Create agent with memory
|
| 55 |
+
agent = await AgentFactory.create_multi_server_agent(with_memory=True)
|
| 56 |
+
config = {"configurable": {"thread_id": "test_session"}}
|
| 57 |
+
|
| 58 |
+
# Test 1: Classify bird
|
| 59 |
+
print("\n[TEST 1]: Classify bird from URL")
|
| 60 |
+
print("="*70)
|
| 61 |
+
result1 = await agent.ainvoke({
|
| 62 |
+
"messages": [{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": "What bird is this? https://images.unsplash.com/photo-1555169062-013468b47731?w=400"
|
| 65 |
+
}]
|
| 66 |
+
}, config)
|
| 67 |
+
print(f"\n[RESULT]: {result1['messages'][-1].content}\n")
|
| 68 |
+
|
| 69 |
+
# Test 2: Ask follow-up (tests memory)
|
| 70 |
+
print("\n[TEST 2]: Follow-up question (tests memory)")
|
| 71 |
+
print("="*70)
|
| 72 |
+
result2 = await agent.ainvoke({
|
| 73 |
+
"messages": [{
|
| 74 |
+
"role": "user",
|
| 75 |
+
"content": "Where can I see this bird near Boston (42.36, -71.06)?"
|
| 76 |
+
}]
|
| 77 |
+
}, config)
|
| 78 |
+
print(f"\n[RESULT]: {result2['messages'][-1].content}\n")
|
| 79 |
+
|
| 80 |
+
print("\n[ALL TESTS COMPLETE!]\n")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
import sys
|
| 85 |
+
|
| 86 |
+
if len(sys.argv) > 1 and sys.argv[1] == "multi":
|
| 87 |
+
asyncio.run(test_multi_server_agent())
|
| 88 |
+
else:
|
| 89 |
+
asyncio.run(test_classifier_agent())
|
langgraph_agent/test_agent.py.v1
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test script for bird classifier agent with multiple scenarios
|
| 3 |
+
"""
|
| 4 |
+
import asyncio
|
| 5 |
+
from simple_demo import create_bird_agent
|
| 6 |
+
|
| 7 |
+
async def test_agent():
|
| 8 |
+
"""Test agent with multiple bird images."""
|
| 9 |
+
|
| 10 |
+
# Create agent once
|
| 11 |
+
agent = await create_bird_agent()
|
| 12 |
+
|
| 13 |
+
test_urls = [
|
| 14 |
+
"https://images.unsplash.com/photo-1555169062-013468b47731?w=400",
|
| 15 |
+
"https://images.unsplash.com/photo-1445820200644-69f87d946277?w=400&auto=format&fit=crop&q=60&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxzZWFyY2h8MTV8fGJpcmR8ZW58MHx8MHx8fDA%3D"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
for i, url in enumerate(test_urls, 1):
|
| 19 |
+
print("\n"+"="*70)
|
| 20 |
+
print(f"[TEST {i}/{len(test_urls)}]: Testing bird classification...")
|
| 21 |
+
print("="*70+"\n")
|
| 22 |
+
|
| 23 |
+
result = await agent.ainvoke({
|
| 24 |
+
"messages": [{
|
| 25 |
+
"role": "user",
|
| 26 |
+
"content": f"Classify the bird in this image: {url}"
|
| 27 |
+
}]
|
| 28 |
+
})
|
| 29 |
+
|
| 30 |
+
print("\n[AGENT RESPONSE]:")
|
| 31 |
+
print(result["messages"][-1].content)
|
| 32 |
+
|
| 33 |
+
print("\n[DEMO COMPLETE!]")
|
| 34 |
+
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
asyncio.run(test_agent())
|
langgraph_agent/tools.py
ADDED
|
File without changes
|
modal_bird_classifier.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Bird Species Classifier MCP Server on Modal
|
| 3 |
+
Updated to support:
|
| 4 |
+
1. classify_from_base64() - for IDE/Cursor clients and Gradio
|
| 5 |
+
2. classify_from_url() - for fallback/public images
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import modal
|
| 9 |
+
from fastmcp import FastMCP
|
| 10 |
+
import base64
|
| 11 |
+
import json
|
| 12 |
+
import httpx
|
| 13 |
+
from io import BytesIO
|
| 14 |
+
from PIL import Image
|
| 15 |
+
import torch
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
# ============================================================================
|
| 19 |
+
# MODAL APP CONFIGURATION
|
| 20 |
+
# ============================================================================
|
| 21 |
+
|
| 22 |
+
app = modal.App("bird-classifier-mcp")
|
| 23 |
+
|
| 24 |
+
image = modal.Image.debian_slim(python_version="3.12").pip_install(
|
| 25 |
+
"transformers==4.46.0",
|
| 26 |
+
"torch==2.5.1",
|
| 27 |
+
"pillow==10.4.0",
|
| 28 |
+
"fastmcp>=2.13.0",
|
| 29 |
+
"pydantic>=2.10.0,<3.0.0",
|
| 30 |
+
"fastapi==0.115.14",
|
| 31 |
+
"httpx>=0.28.0",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
API_KEY_SECRET = modal.Secret.from_name("bird-classifier-api-key")
|
| 35 |
+
|
| 36 |
+
# ============================================================================
|
| 37 |
+
# MCP SERVER DEFINITION
|
| 38 |
+
# ============================================================================
|
| 39 |
+
|
| 40 |
+
def make_mcp_server():
|
| 41 |
+
"""Create FastMCP server with bird classification tools."""
|
| 42 |
+
from transformers import pipeline
|
| 43 |
+
|
| 44 |
+
mcp = FastMCP("Bird Species Classifier")
|
| 45 |
+
|
| 46 |
+
print("🔄 Loading bird classifier model...")
|
| 47 |
+
classifier = pipeline(
|
| 48 |
+
"image-classification",
|
| 49 |
+
model="prithivMLmods/Bird-Species-Classifier-526",
|
| 50 |
+
device=0
|
| 51 |
+
)
|
| 52 |
+
print("✅ Model loaded!")
|
| 53 |
+
|
| 54 |
+
def preprocess_image(image: Image.Image, max_size: int = 800) -> Image.Image:
|
| 55 |
+
"""Resize and convert to RGB."""
|
| 56 |
+
if image.mode != 'RGB':
|
| 57 |
+
image = image.convert('RGB')
|
| 58 |
+
|
| 59 |
+
if max(image.size) > max_size:
|
| 60 |
+
ratio = max_size / max(image.size)
|
| 61 |
+
new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio))
|
| 62 |
+
image = image.resize(new_size, Image.Resampling.LANCZOS)
|
| 63 |
+
|
| 64 |
+
return image
|
| 65 |
+
|
| 66 |
+
# ========================================================================
|
| 67 |
+
# TOOL 1: classify_from_base64 (PRIMARY - for IDE/Gradio)
|
| 68 |
+
# ========================================================================
|
| 69 |
+
|
| 70 |
+
@mcp.tool()
|
| 71 |
+
async def classify_from_base64(image_data: str) -> str:
|
| 72 |
+
"""
|
| 73 |
+
Classify a bird species from base64-encoded image data.
|
| 74 |
+
|
| 75 |
+
This is the primary tool for IDE clients and Gradio apps.
|
| 76 |
+
Accepts raw base64 or data URL format.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
image_data: Base64-encoded image string (PNG/JPG)
|
| 80 |
+
Can be raw base64 or "data:image/png;base64,..."
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
JSON string with species name and confidence score
|
| 84 |
+
Format: {"species": "Common Name", "confidence": 0.95}
|
| 85 |
+
"""
|
| 86 |
+
try:
|
| 87 |
+
# Handle data URL format
|
| 88 |
+
if image_data.startswith("data:"):
|
| 89 |
+
image_data = image_data.split(",")[1]
|
| 90 |
+
|
| 91 |
+
# Decode base64
|
| 92 |
+
print(f"[STATUS]: Decoding base64 image ({len(image_data)} chars)...")
|
| 93 |
+
image_bytes = base64.b64decode(image_data)
|
| 94 |
+
image = Image.open(BytesIO(image_bytes))
|
| 95 |
+
image = preprocess_image(image)
|
| 96 |
+
|
| 97 |
+
# Classify
|
| 98 |
+
print(f"[STATUS]: Classifying image...")
|
| 99 |
+
results = classifier(image, top_k=1)
|
| 100 |
+
top_result = results[0]
|
| 101 |
+
|
| 102 |
+
return json.dumps({
|
| 103 |
+
"species": top_result['label'],
|
| 104 |
+
"confidence": round(top_result['score'], 4),
|
| 105 |
+
"source": "base64"
|
| 106 |
+
})
|
| 107 |
+
|
| 108 |
+
except Exception as e:
|
| 109 |
+
return json.dumps({
|
| 110 |
+
"error": str(e),
|
| 111 |
+
"species": None,
|
| 112 |
+
"confidence": 0.0
|
| 113 |
+
})
|
| 114 |
+
|
| 115 |
+
# ========================================================================
|
| 116 |
+
# TOOL 2: classify_from_url (FALLBACK)
|
| 117 |
+
# ========================================================================
|
| 118 |
+
|
| 119 |
+
@mcp.tool()
|
| 120 |
+
async def classify_from_url(image_url: str) -> str:
|
| 121 |
+
"""
|
| 122 |
+
Download image from URL and classify bird species.
|
| 123 |
+
|
| 124 |
+
Fallback tool for clients that have URL access.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
image_url: URL to the image (https://example.com/bird.jpg)
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
JSON string with species name and confidence score
|
| 131 |
+
"""
|
| 132 |
+
try:
|
| 133 |
+
print(f"[STATUS]: Downloading from URL...")
|
| 134 |
+
response = httpx.get(image_url, follow_redirects=True, timeout=15)
|
| 135 |
+
response.raise_for_status()
|
| 136 |
+
|
| 137 |
+
image = Image.open(BytesIO(response.content))
|
| 138 |
+
image = preprocess_image(image)
|
| 139 |
+
|
| 140 |
+
results = classifier(image, top_k=1)
|
| 141 |
+
top_result = results[0]
|
| 142 |
+
|
| 143 |
+
return json.dumps({
|
| 144 |
+
"species": top_result['label'],
|
| 145 |
+
"confidence": round(top_result['score'], 4),
|
| 146 |
+
"source": "url"
|
| 147 |
+
})
|
| 148 |
+
|
| 149 |
+
except Exception as e:
|
| 150 |
+
return json.dumps({
|
| 151 |
+
"error": str(e),
|
| 152 |
+
"species": None,
|
| 153 |
+
"confidence": 0.0
|
| 154 |
+
})
|
| 155 |
+
|
| 156 |
+
return mcp
|
| 157 |
+
|
| 158 |
+
# ============================================================================
|
| 159 |
+
# WEB ENDPOINT WITH AUTHENTICATION
|
| 160 |
+
# ============================================================================
|
| 161 |
+
|
| 162 |
+
@app.function(
|
| 163 |
+
image=image,
|
| 164 |
+
#gpu="L40S",
|
| 165 |
+
gpu="T4",
|
| 166 |
+
secrets=[API_KEY_SECRET],
|
| 167 |
+
timeout=300,
|
| 168 |
+
min_containers=0,
|
| 169 |
+
max_containers=5,
|
| 170 |
+
scaledown_window=60,
|
| 171 |
+
)
|
| 172 |
+
@modal.asgi_app()
|
| 173 |
+
def web():
|
| 174 |
+
"""ASGI web endpoint for MCP server with API key auth."""
|
| 175 |
+
from fastapi import FastAPI, Request, HTTPException
|
| 176 |
+
from fastapi.responses import JSONResponse
|
| 177 |
+
|
| 178 |
+
print("[STATUS]: Starting MCP server...")
|
| 179 |
+
|
| 180 |
+
mcp = make_mcp_server()
|
| 181 |
+
mcp_app = mcp.http_app(transport="streamable-http", stateless_http=True)
|
| 182 |
+
|
| 183 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 184 |
+
|
| 185 |
+
fastapi_app = FastAPI(
|
| 186 |
+
title="Bird Classifier MCP Server",
|
| 187 |
+
description="MCP server for bird species classification",
|
| 188 |
+
lifespan=mcp_app.lifespan
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
@fastapi_app.middleware("http")
|
| 192 |
+
async def verify_api_key(request: Request, call_next):
|
| 193 |
+
"""Verify API key on every request"""
|
| 194 |
+
api_key = request.headers.get("X-API-Key")
|
| 195 |
+
expected_key = os.environ.get("API_KEY")
|
| 196 |
+
|
| 197 |
+
if not api_key or api_key != expected_key:
|
| 198 |
+
return JSONResponse(
|
| 199 |
+
status_code=401,
|
| 200 |
+
content={"error": "Invalid or missing API key"}
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
return await call_next(request)
|
| 204 |
+
|
| 205 |
+
fastapi_app.mount("/", mcp_app)
|
| 206 |
+
|
| 207 |
+
print("[STATUS]: MCP server is ready!")
|
| 208 |
+
return fastapi_app
|
| 209 |
+
|
| 210 |
+
# ============================================================================
|
| 211 |
+
# TEST FUNCTION
|
| 212 |
+
# ============================================================================
|
| 213 |
+
|
| 214 |
+
@app.function(image=image, secrets=[API_KEY_SECRET])
|
| 215 |
+
async def test_classifier():
|
| 216 |
+
"""Test MCP server"""
|
| 217 |
+
from fastmcp import Client
|
| 218 |
+
from fastmcp.client.transports import StreamableHttpTransport
|
| 219 |
+
|
| 220 |
+
print("\n"+"="*70)
|
| 221 |
+
print("[STATUS]: Testing Bird Classifier MCP server...")
|
| 222 |
+
print("="*70+"\n")
|
| 223 |
+
|
| 224 |
+
server_url = f"{web.get_web_url()}/mcp/"
|
| 225 |
+
|
| 226 |
+
transport = StreamableHttpTransport(
|
| 227 |
+
url=server_url,
|
| 228 |
+
headers={"X-API-Key": os.environ.get("API_KEY")}
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
client = Client(transport)
|
| 232 |
+
|
| 233 |
+
try:
|
| 234 |
+
async with client:
|
| 235 |
+
# List tools
|
| 236 |
+
print("\nAvailable Tools:")
|
| 237 |
+
tools = await client.list_tools()
|
| 238 |
+
for tool in tools:
|
| 239 |
+
print(f" - {tool.name}")
|
| 240 |
+
|
| 241 |
+
# Test classify_from_url
|
| 242 |
+
print("\n"+"="*70)
|
| 243 |
+
print("[TEST 1]: classify_from_url")
|
| 244 |
+
print("="*70)
|
| 245 |
+
|
| 246 |
+
test_url = "https://images.unsplash.com/photo-1444464666168-49d633b86797?w=400"
|
| 247 |
+
result = await client.call_tool(
|
| 248 |
+
"classify_from_url",
|
| 249 |
+
arguments={"image_url": test_url}
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
if result.content:
|
| 253 |
+
result_text = result.content[0].text
|
| 254 |
+
data = json.loads(result_text)
|
| 255 |
+
print(f"[RESULT]: {data.get('species')} ({data.get('confidence'):.1%})")
|
| 256 |
+
|
| 257 |
+
except Exception as e:
|
| 258 |
+
print(f"[ERROR]: {e}")
|
| 259 |
+
|
| 260 |
+
print("\n"+"="*70)
|
| 261 |
+
print("[STATUS]: Test complete!")
|
| 262 |
+
print("="*70+"\n")
|
nuthatch_tools.py
ADDED
|
@@ -0,0 +1,737 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Nuthatch MCP Server
|
| 3 |
+
Wraps Nuthatch API v2.3.1 as reusable MCP tools
|
| 4 |
+
Runs locally with FastMCP and supports STDIO transport
|
| 5 |
+
|
| 6 |
+
Features:
|
| 7 |
+
- 7 core tools for bird species reference data
|
| 8 |
+
- Rich media: images (Unsplash) + audio (xeno-canto.org)
|
| 9 |
+
- Taxonomic search and conservation status filtering
|
| 10 |
+
- JSON responses for easy integration
|
| 11 |
+
- Rate limiting and error handling
|
| 12 |
+
|
| 13 |
+
Difference from eBird:
|
| 14 |
+
- eBird: Real-time sightings, location-based hotspots
|
| 15 |
+
- Nuthatch: Species reference, images, audio, taxonomy
|
| 16 |
+
- Focus: "what is this bird?" vs. "Where can I see it?"
|
| 17 |
+
"""
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
import requests
|
| 21 |
+
import json
|
| 22 |
+
import time
|
| 23 |
+
from typing import Optional, Dict, List, Any
|
| 24 |
+
from fastmcp import FastMCP
|
| 25 |
+
from dotenv import load_dotenv
|
| 26 |
+
|
| 27 |
+
# ============================================================================
|
| 28 |
+
# CONFIGURATION & SETUP
|
| 29 |
+
# ============================================================================
|
| 30 |
+
|
| 31 |
+
load_dotenv()
|
| 32 |
+
|
| 33 |
+
NUTHATCH_API_KEY = os.getenv("NUTHATCH_API_KEY")
|
| 34 |
+
NUTHATCH_BASE_URL = os.getenv("NUTHATCH_BASE_URL", "https://nuthatch.lastelm.software/v2")
|
| 35 |
+
DEFAULT_TIMEOUT = 15
|
| 36 |
+
# Rate limiting: 500 requests/hour = 7.2s safe, but 1s acceptable for demos
|
| 37 |
+
# Demo sessions are bursty (5-10 requests in 30 seconds, then idle)
|
| 38 |
+
# 1 second = 60 requests/minute max = 360/hour in worst case (still under 500)
|
| 39 |
+
RATE_LIMIT_DELAY = 1.0 # Balance between responsiveness and API limits
|
| 40 |
+
|
| 41 |
+
if not NUTHATCH_API_KEY:
|
| 42 |
+
# Print to stderr to avoid corrupting STDIO MCP protocol (stdout must be JSON-RPC only)
|
| 43 |
+
print("[WARNING]: NUTHATCH_API_KEY not found in .env", file=sys.stderr)
|
| 44 |
+
print(" Get one from: https://nuthatch.lastelm.software/", file=sys.stderr)
|
| 45 |
+
|
| 46 |
+
# Tool configuration - enable/disable as needed
|
| 47 |
+
ENABLED_TOOLS = {
|
| 48 |
+
"search_birds": True,
|
| 49 |
+
"get_bird_info": True,
|
| 50 |
+
"get_bird_images": True,
|
| 51 |
+
"get_bird_audio": True,
|
| 52 |
+
"search_by_family": True,
|
| 53 |
+
"filter_by_status": True,
|
| 54 |
+
"get_all_families": True,
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
# Authentication configuration for HTTP mode
|
| 58 |
+
NUTHATCH_MCP_AUTH_KEY = os.getenv("NUTHATCH_MCP_AUTH_KEY")
|
| 59 |
+
|
| 60 |
+
# Initialize FastMCP server with optional auth
|
| 61 |
+
if NUTHATCH_MCP_AUTH_KEY:
|
| 62 |
+
# HTTP mode with authentication
|
| 63 |
+
from fastmcp.server.auth.providers.debug import DebugTokenVerifier
|
| 64 |
+
|
| 65 |
+
auth = DebugTokenVerifier(
|
| 66 |
+
validate=lambda token: token == NUTHATCH_MCP_AUTH_KEY,
|
| 67 |
+
client_id="nuthatch-mcp-client"
|
| 68 |
+
)
|
| 69 |
+
mcp = FastMCP("Nuthatch Bird Reference", auth=auth)
|
| 70 |
+
else:
|
| 71 |
+
# Development: No authentication
|
| 72 |
+
mcp = FastMCP("Nuthatch Bird Reference")
|
| 73 |
+
|
| 74 |
+
# Rate limiting tracker
|
| 75 |
+
_last_request_time = 0
|
| 76 |
+
|
| 77 |
+
# ============================================================================
|
| 78 |
+
# HELPER FUNCTIONS
|
| 79 |
+
# ============================================================================
|
| 80 |
+
|
| 81 |
+
def _rate_limit():
|
| 82 |
+
"""Enforce rate limiting to avoid exceeding Nuthatch's API limits (500/hour)"""
|
| 83 |
+
global _last_request_time
|
| 84 |
+
elapsed = time.time() - _last_request_time
|
| 85 |
+
if elapsed < RATE_LIMIT_DELAY:
|
| 86 |
+
time.sleep(RATE_LIMIT_DELAY - elapsed)
|
| 87 |
+
_last_request_time = time.time()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _make_request(endpoint: str, params: Optional[Dict] = None) -> Optional[Dict]:
|
| 91 |
+
"""
|
| 92 |
+
Centralized request handler with error handling and rate limiting.
|
| 93 |
+
|
| 94 |
+
IMPORTANT: Header name is case-sensitive! "API-Key"
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
endpoint: API endpoint path (e.g., "/birds")
|
| 98 |
+
params: Query parameters dictionary
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
JSON response data or None on error
|
| 102 |
+
"""
|
| 103 |
+
_rate_limit()
|
| 104 |
+
try:
|
| 105 |
+
headers = {"API-Key": NUTHATCH_API_KEY} # Case-sensitive!
|
| 106 |
+
url = f"{NUTHATCH_BASE_URL}{endpoint}"
|
| 107 |
+
response = requests.get(
|
| 108 |
+
url,
|
| 109 |
+
headers=headers,
|
| 110 |
+
params=params or {},
|
| 111 |
+
timeout=DEFAULT_TIMEOUT,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
if response.status_code == 200:
|
| 115 |
+
return response.json()
|
| 116 |
+
elif response.status_code == 400:
|
| 117 |
+
print(f" Bad Request ({url}): {response.text[:400]}", flush=True)
|
| 118 |
+
return None
|
| 119 |
+
elif response.status_code == 401:
|
| 120 |
+
print(f" Unauthorized ({url}): Check your NUTHATCH_API_KEY - body={response.text[:400]}", flush=True)
|
| 121 |
+
return None
|
| 122 |
+
elif response.status_code == 404:
|
| 123 |
+
print(f" Not found ({url}): Invalid endpoint or resource - body={response.text[:400]}", flush=True)
|
| 124 |
+
return None
|
| 125 |
+
else:
|
| 126 |
+
print(
|
| 127 |
+
f" HTTP {response.status_code} for {url} "
|
| 128 |
+
f"params={params or {}} body={response.text[:400]}",
|
| 129 |
+
flush=True,
|
| 130 |
+
)
|
| 131 |
+
return None
|
| 132 |
+
|
| 133 |
+
except requests.Timeout:
|
| 134 |
+
print(f" Request timeout after {DEFAULT_TIMEOUT}s for {endpoint}", flush=True)
|
| 135 |
+
return None
|
| 136 |
+
except requests.ConnectionError:
|
| 137 |
+
print(f" Connection error calling {endpoint} - check network", flush=True)
|
| 138 |
+
return None
|
| 139 |
+
except Exception as e:
|
| 140 |
+
print(f" Unexpected error calling {endpoint}: {str(e)}", flush=True)
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def _format_success_response(data: Any, **kwargs) -> str:
|
| 145 |
+
"""Format a successful response as JSON"""
|
| 146 |
+
response = {"status": "success", "data": data}
|
| 147 |
+
response.update(kwargs)
|
| 148 |
+
return json.dumps(response)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _format_error_response(error: str) -> str:
|
| 152 |
+
"""Format an error response as JSON"""
|
| 153 |
+
return json.dumps({"status": "error", "error": error})
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# TOOL 1: search_birds
|
| 158 |
+
# ============================================================================
|
| 159 |
+
# Use case: User asks "What cardinals exist?" or classifier returns "Northern Cardinal"
|
| 160 |
+
# This tool provides multi-filter search across the species database
|
| 161 |
+
|
| 162 |
+
def search_birds(
|
| 163 |
+
name: str = "",
|
| 164 |
+
family: str = "",
|
| 165 |
+
region: str = "",
|
| 166 |
+
status: str = "",
|
| 167 |
+
has_images: bool = True,
|
| 168 |
+
page_size: int = 10
|
| 169 |
+
) -> str:
|
| 170 |
+
"""
|
| 171 |
+
Search for bird species using multiple filters.
|
| 172 |
+
|
| 173 |
+
Comprehensive search tool that combines name, taxonomy, geography, and media filters.
|
| 174 |
+
Great for exploratory queries like "show me all cardinals" or "endangered birds".
|
| 175 |
+
|
| 176 |
+
Can accept:
|
| 177 |
+
- User input: "cardinals", "eagles", "finches"
|
| 178 |
+
- Classifier output: "Northern Cardinal" -> search for similar species
|
| 179 |
+
- Taxonomic queries: family="Cardinalidae"
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
name: Common or scientific name (partial match)
|
| 183 |
+
family: Scientific family name (e.g., "Cardinalidae", "Anatidae")
|
| 184 |
+
region: Geographic region ("North America", "Western Europe")
|
| 185 |
+
status: Conservation status ("Low Concern", "Endangered", etc.)
|
| 186 |
+
has_images: Only returns birds with images (default: True)
|
| 187 |
+
page_size: Maximum results to return (max: 100)
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
JSON with matching birds and their basic info
|
| 191 |
+
|
| 192 |
+
Example:
|
| 193 |
+
search_birds(name="cardinal", has_images=True)
|
| 194 |
+
-> Returns all cardinal species with images
|
| 195 |
+
"""
|
| 196 |
+
if not name and not family and not region and not status:
|
| 197 |
+
return _format_error_response("At least one search filter required")
|
| 198 |
+
|
| 199 |
+
if page_size > 100:
|
| 200 |
+
page_size = 100
|
| 201 |
+
|
| 202 |
+
try:
|
| 203 |
+
params = {"pageSize": page_size}
|
| 204 |
+
# Nuthatch API expects "true"/"false" strings, not Python booleans
|
| 205 |
+
if has_images is not None:
|
| 206 |
+
params["hasImg"] = "true" if has_images else "false"
|
| 207 |
+
if name:
|
| 208 |
+
# API is case-sensitive - convert to lowercase for reliable matching
|
| 209 |
+
params["name"] = name.lower()
|
| 210 |
+
if family:
|
| 211 |
+
# API is case-sensitive - convert to lowercase for reliable matching
|
| 212 |
+
params["family"] = family.lower()
|
| 213 |
+
if region:
|
| 214 |
+
params["region"] = region
|
| 215 |
+
if status:
|
| 216 |
+
params["status"] = status
|
| 217 |
+
|
| 218 |
+
data = _make_request("/birds", params)
|
| 219 |
+
|
| 220 |
+
if data is None:
|
| 221 |
+
return _format_error_response("Failed to fetch birds")
|
| 222 |
+
|
| 223 |
+
if not data.get('entities'):
|
| 224 |
+
return _format_success_response(
|
| 225 |
+
[],
|
| 226 |
+
count=0,
|
| 227 |
+
total_count=0,
|
| 228 |
+
filters={"name": name, "family": family, "region": region, "status": status}
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
# Format results with essential info
|
| 232 |
+
birds = [
|
| 233 |
+
{
|
| 234 |
+
"name": bird['name'],
|
| 235 |
+
"scientific_name": bird['sciName'],
|
| 236 |
+
"family": bird.get('family', 'Unknown'),
|
| 237 |
+
"order": bird.get('order', 'Unknown'),
|
| 238 |
+
"status": bird.get('status', 'Unknown'),
|
| 239 |
+
"region": bird.get('region', []),
|
| 240 |
+
"image_count": len(bird.get('images', [])),
|
| 241 |
+
"has_audio": len(bird.get('recordings', [])) > 0
|
| 242 |
+
}
|
| 243 |
+
for bird in data['entities']
|
| 244 |
+
]
|
| 245 |
+
|
| 246 |
+
return _format_success_response(
|
| 247 |
+
birds,
|
| 248 |
+
count=len(birds),
|
| 249 |
+
total_count=data.get('totalCount', 0),
|
| 250 |
+
filters={"name": name, "family": family, "region": region, "status": status}
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
except Exception as e:
|
| 254 |
+
return _format_error_response(f"Search failed: {str(e)}")
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# Register as MCP tool
|
| 258 |
+
mcp.tool()(search_birds)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# ============================================================================
|
| 262 |
+
# TOOL 2: get_bird_info
|
| 263 |
+
# ============================================================================
|
| 264 |
+
# Use case: After classifier identifies a bird, get complete species details
|
| 265 |
+
|
| 266 |
+
def get_bird_info(name: str) -> str:
|
| 267 |
+
"""
|
| 268 |
+
Get comprehensive information about a specific bird species.
|
| 269 |
+
|
| 270 |
+
Returns all available data: taxonomy, size, conservation status,
|
| 271 |
+
image/audio counts, and geographic range.
|
| 272 |
+
|
| 273 |
+
Can accept:
|
| 274 |
+
- User input: "Northern Cardinal"
|
| 275 |
+
- Classifier output: Species name from image classification
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
name: Common or scientific name of the bird
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
JSON with complete species information
|
| 282 |
+
|
| 283 |
+
Example:
|
| 284 |
+
get_bird_info("Northern Cardinal")
|
| 285 |
+
-> Returns full details: family, size, status, media counts, etc.
|
| 286 |
+
"""
|
| 287 |
+
if not name or len(name.strip()) < 2:
|
| 288 |
+
return _format_error_response("Bird name required (minimum 2 characters)")
|
| 289 |
+
|
| 290 |
+
try:
|
| 291 |
+
# Search for exact or closest match
|
| 292 |
+
# API is case-sensitive - convert to lowercase for reliable matching
|
| 293 |
+
params = {"name": name.lower(), "pageSize": 1}
|
| 294 |
+
data = _make_request("/birds", params)
|
| 295 |
+
|
| 296 |
+
if data is None or not data.get('entities'):
|
| 297 |
+
return _format_error_response(f"Bird '{name}' not found in database")
|
| 298 |
+
|
| 299 |
+
bird = data['entities'][0]
|
| 300 |
+
|
| 301 |
+
# Compile comprehensive info
|
| 302 |
+
info = {
|
| 303 |
+
"name": bird['name'],
|
| 304 |
+
"scientific_name": bird['sciName'],
|
| 305 |
+
"family": bird.get('family', 'Unknown'),
|
| 306 |
+
"order": bird.get('order', 'Unknown'),
|
| 307 |
+
"status": bird.get('status', 'Unknown'),
|
| 308 |
+
"region": bird.get('region', []),
|
| 309 |
+
"length_cm": {
|
| 310 |
+
"min": bird.get('lengthMin'),
|
| 311 |
+
"max": bird.get('lengthMax')
|
| 312 |
+
} if bird.get('lengthMin') else None,
|
| 313 |
+
"wingspan_cm": {
|
| 314 |
+
"min": bird.get('wingspanMin'),
|
| 315 |
+
"max": bird.get('wingspanMax')
|
| 316 |
+
} if bird.get('wingspanMin') else None,
|
| 317 |
+
"image_count": len(bird.get('images', [])),
|
| 318 |
+
"audio_count": len(bird.get('recordings', [])),
|
| 319 |
+
"has_images": len(bird.get('images', [])) > 0,
|
| 320 |
+
"has_audio": len(bird.get('recordings', [])) > 0
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
return _format_success_response(info, bird_name=name)
|
| 324 |
+
|
| 325 |
+
except Exception as e:
|
| 326 |
+
return _format_error_response(f"Lookup failed: {str(e)}")
|
| 327 |
+
|
| 328 |
+
# Register as MCP tool
|
| 329 |
+
mcp.tool()(get_bird_info)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
# ============================================================================
|
| 333 |
+
# TOOL 3: get_bird_images
|
| 334 |
+
# ============================================================================
|
| 335 |
+
# Use case: Show reference images to compare with user's uploaded photo
|
| 336 |
+
|
| 337 |
+
def get_bird_images(name: str, max_images: int = 5) -> str:
|
| 338 |
+
"""
|
| 339 |
+
Get image URLs for a bird species.
|
| 340 |
+
|
| 341 |
+
Returns high-quality reference images from Unsplash and curator photos.
|
| 342 |
+
Perfect for visual comparison with user's uploaded photo.
|
| 343 |
+
|
| 344 |
+
Can accept:
|
| 345 |
+
- User input: "Show me pictures of a cardinal"
|
| 346 |
+
- Classifier output: Species name -> fetch reference images
|
| 347 |
+
|
| 348 |
+
Args:
|
| 349 |
+
name: Common or scientific name of the bird
|
| 350 |
+
max_images: Maximum number of image URLs to return (default: 5)
|
| 351 |
+
|
| 352 |
+
Returns:
|
| 353 |
+
JSON with image URLs and bird identification
|
| 354 |
+
|
| 355 |
+
Example:
|
| 356 |
+
get_bird_images("Northern Cardinal", max_images=3)
|
| 357 |
+
-> Returns 3 image URLs for visual comparison
|
| 358 |
+
"""
|
| 359 |
+
if not name or len(name.strip()) < 2:
|
| 360 |
+
return _format_error_response("Bird name required (minimum 2 characters)")
|
| 361 |
+
|
| 362 |
+
try:
|
| 363 |
+
# API is case-sensitive - convert to lowercase for reliable matching
|
| 364 |
+
params = {"name": name.lower(), "pageSize": 1, "hasImg": "true"}
|
| 365 |
+
data = _make_request("/birds", params)
|
| 366 |
+
|
| 367 |
+
if data is None or not data.get('entities'):
|
| 368 |
+
return _format_error_response(f"Bird '{name}' not found or has no images")
|
| 369 |
+
|
| 370 |
+
bird = data['entities'][0]
|
| 371 |
+
images = bird.get('images', [])
|
| 372 |
+
|
| 373 |
+
if not images:
|
| 374 |
+
return _format_error_response(f"No images available for '{bird['name']}'")
|
| 375 |
+
|
| 376 |
+
return _format_success_response(
|
| 377 |
+
images[:max_images],
|
| 378 |
+
bird_name=bird['name'],
|
| 379 |
+
scientific_name=bird['sciName'],
|
| 380 |
+
total_images=len(images),
|
| 381 |
+
returned_count=min(len(images), max_images)
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
except Exception as e:
|
| 385 |
+
return _format_error_response(f"Image lookup failed: {str(e)}")
|
| 386 |
+
|
| 387 |
+
# Register as MCP tool
|
| 388 |
+
mcp.tool()(get_bird_images)
|
| 389 |
+
|
| 390 |
+
# ============================================================================
|
| 391 |
+
# TOOL 4: get_bird_audio
|
| 392 |
+
# ============================================================================
|
| 393 |
+
# Use case: Provide audio recordings so user can learn bird's call/song
|
| 394 |
+
|
| 395 |
+
def get_bird_audio(name: str, max_recordings: int = 5) -> str:
|
| 396 |
+
"""
|
| 397 |
+
Get audio recordings for a bird species.
|
| 398 |
+
|
| 399 |
+
Returns recordings from xeno-canto.org with location, date, and type info.
|
| 400 |
+
Great for learning bird calls and songs.
|
| 401 |
+
|
| 402 |
+
Can accept:
|
| 403 |
+
- User input: "What does a cardinal sound like?"
|
| 404 |
+
- Classifier output: Species name -> fetch audio examples
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
name: Common or scientific name of the bird
|
| 408 |
+
max_recordings: Maximum number of recordings to return (default: 5)
|
| 409 |
+
|
| 410 |
+
Returns:
|
| 411 |
+
JSON with recording metadata and download URLs
|
| 412 |
+
|
| 413 |
+
Example:
|
| 414 |
+
get_bird_audio("Northern Cardinal", max_recordings=3)
|
| 415 |
+
-> Returns 3 audio recordings with metadata
|
| 416 |
+
"""
|
| 417 |
+
if not name or len(name.strip()) < 2:
|
| 418 |
+
return _format_error_response("Bird name required (minimum 2 characters)")
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
# API is case-sensitive - convert to lowercase for reliable matching
|
| 422 |
+
params = {"name": name.lower(), "pageSize": 1}
|
| 423 |
+
data = _make_request("/birds", params)
|
| 424 |
+
|
| 425 |
+
if data is None or not data.get('entities'):
|
| 426 |
+
return _format_error_response(f"Bird '{name}' not found")
|
| 427 |
+
|
| 428 |
+
bird = data['entities'][0]
|
| 429 |
+
recordings = bird.get('recordings', [])
|
| 430 |
+
|
| 431 |
+
if not recordings:
|
| 432 |
+
return _format_error_response(f"No audio recordings available for '{bird['name']}'")
|
| 433 |
+
|
| 434 |
+
# Format recording info (keep essential fields only)
|
| 435 |
+
formatted_recordings = [
|
| 436 |
+
{
|
| 437 |
+
"type": rec.get('type', 'Unknown'),
|
| 438 |
+
"location": rec.get('loc', 'Unknown'),
|
| 439 |
+
"country": rec.get('cnt', 'Unknown'),
|
| 440 |
+
"date": rec.get('date', 'Unknown'),
|
| 441 |
+
"recordist": rec.get('rec', 'Unknown'),
|
| 442 |
+
"file_url": rec.get('file', ''),
|
| 443 |
+
"xeno_canto_url": rec.get('url', ''),
|
| 444 |
+
"quality": rec.get('q', ''),
|
| 445 |
+
"length": rec.get('length', 'Unknown')
|
| 446 |
+
}
|
| 447 |
+
for rec in recordings[:max_recordings]
|
| 448 |
+
]
|
| 449 |
+
|
| 450 |
+
return _format_success_response(
|
| 451 |
+
formatted_recordings,
|
| 452 |
+
bird_name=bird['name'],
|
| 453 |
+
scientific_name=bird['sciName'],
|
| 454 |
+
total_recordings=len(recordings),
|
| 455 |
+
returned_count=min(len(recordings), max_recordings)
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
except Exception as e:
|
| 459 |
+
return _format_error_response(f"Audio lookup failed: {str(e)}")
|
| 460 |
+
|
| 461 |
+
# Register as MCP tool
|
| 462 |
+
mcp.tool()(get_bird_audio)
|
| 463 |
+
|
| 464 |
+
# ============================================================================
|
| 465 |
+
# TOOL 5: search_by_family
|
| 466 |
+
# ============================================================================
|
| 467 |
+
# Use case: "Show me all birds in the same family as this cardinal"
|
| 468 |
+
|
| 469 |
+
def search_by_family(family_name: str, max_results: int = 20) -> str:
|
| 470 |
+
"""
|
| 471 |
+
Get all bird species in a taxonomic family.
|
| 472 |
+
|
| 473 |
+
Great for exploring related species after identifying a bird.
|
| 474 |
+
Example: "This is a cardinal. What other cardinals exist?"
|
| 475 |
+
|
| 476 |
+
Can accept:
|
| 477 |
+
- User input: "Show me all finches"
|
| 478 |
+
- Derived from classification: After identifying a Cardinalidae member
|
| 479 |
+
|
| 480 |
+
Args:
|
| 481 |
+
family_name: Scientific family name (e.g., "Cardinalidae", "Fringillidae")
|
| 482 |
+
max_results: Maximum species to return (default: 20)
|
| 483 |
+
|
| 484 |
+
Returns:
|
| 485 |
+
JSON with all species in that family
|
| 486 |
+
|
| 487 |
+
Example:
|
| 488 |
+
search_by_family("Cardinalidae")
|
| 489 |
+
-> Returns Northern Cardinal, Pyrrhuloxia, Rose-breasted Grosbeak, etc.
|
| 490 |
+
"""
|
| 491 |
+
if not family_name or len(family_name.strip()) < 2:
|
| 492 |
+
return _format_error_response("Family name required (minimum 2 characters)")
|
| 493 |
+
|
| 494 |
+
try:
|
| 495 |
+
# API is case-sensitive - convert to lowercase for reliable matching
|
| 496 |
+
params = {"family": family_name.lower(), "pageSize": min(max_results, 100)}
|
| 497 |
+
data = _make_request("/birds", params)
|
| 498 |
+
|
| 499 |
+
if data is None:
|
| 500 |
+
return _format_error_response("Failed to fetch family data")
|
| 501 |
+
|
| 502 |
+
if not data.get('entities'):
|
| 503 |
+
return _format_error_response(f"No birds found in family '{family_name}'")
|
| 504 |
+
|
| 505 |
+
# Format family members
|
| 506 |
+
birds = [
|
| 507 |
+
{
|
| 508 |
+
"name": bird['name'],
|
| 509 |
+
"scientific_name": bird['sciName'],
|
| 510 |
+
"status": bird.get('status', 'Unknown'),
|
| 511 |
+
"region": bird.get('region', []),
|
| 512 |
+
"has_images": len(bird.get('images', [])) > 0,
|
| 513 |
+
"has_audio": len(bird.get('recordings', [])) > 0
|
| 514 |
+
}
|
| 515 |
+
for bird in data['entities']
|
| 516 |
+
]
|
| 517 |
+
|
| 518 |
+
return _format_success_response(
|
| 519 |
+
birds,
|
| 520 |
+
family=family_name,
|
| 521 |
+
count=len(birds),
|
| 522 |
+
total_count=data.get('totalCount', 0)
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
except Exception as e:
|
| 526 |
+
return _format_error_response(f"Family search failed: {str(e)}")
|
| 527 |
+
|
| 528 |
+
# Register as MCP tool
|
| 529 |
+
mcp.tool()(search_by_family)
|
| 530 |
+
|
| 531 |
+
# ============================================================================
|
| 532 |
+
# TOOL 6: filter_by_status
|
| 533 |
+
# ============================================================================
|
| 534 |
+
# Use case: "Show me endangered birds" or conservation awareness
|
| 535 |
+
|
| 536 |
+
def filter_by_status(status: str, region: str = "", max_results: int = 20) -> str:
|
| 537 |
+
"""
|
| 538 |
+
Find birds by conservation status.
|
| 539 |
+
|
| 540 |
+
Great for conservation awareness and educational purposes.
|
| 541 |
+
Common statuses: "Low Concern", "Endangered", "Threatened", "Vulnerable"
|
| 542 |
+
|
| 543 |
+
Can accept:
|
| 544 |
+
- User input: "Show me endangered birds"
|
| 545 |
+
- Educational queries: "What birds are threatened?"
|
| 546 |
+
|
| 547 |
+
Args:
|
| 548 |
+
status: Conservation status to filter by
|
| 549 |
+
region: Optional geographic filter ("North America", "Western Europe")
|
| 550 |
+
max_results: Maximum birds to return (default: 20)
|
| 551 |
+
|
| 552 |
+
Returns:
|
| 553 |
+
JSON with birds matching the conservation status
|
| 554 |
+
|
| 555 |
+
Example:
|
| 556 |
+
filter_by_status("Endangered", region="North America")
|
| 557 |
+
-> Returns endangered birds in North America
|
| 558 |
+
"""
|
| 559 |
+
if not status or len(status.strip()) < 2:
|
| 560 |
+
return _format_error_response("Conservation status required")
|
| 561 |
+
|
| 562 |
+
try:
|
| 563 |
+
params = {"status": status, "pageSize": min(max_results, 100)}
|
| 564 |
+
if region:
|
| 565 |
+
params["region"] = region
|
| 566 |
+
|
| 567 |
+
data = _make_request("/birds", params)
|
| 568 |
+
|
| 569 |
+
if data is None:
|
| 570 |
+
return _format_error_response("Failed to fetch status data")
|
| 571 |
+
|
| 572 |
+
if not data.get('entities'):
|
| 573 |
+
return _format_error_response(
|
| 574 |
+
f"No birds found with status '{status}'"
|
| 575 |
+
+ (f" in region '{region}'" if region else "")
|
| 576 |
+
)
|
| 577 |
+
# Format results
|
| 578 |
+
birds = [
|
| 579 |
+
{
|
| 580 |
+
"name": bird['name'],
|
| 581 |
+
"scientific_name": bird['sciName'],
|
| 582 |
+
"family": bird.get('family', 'Unknown'),
|
| 583 |
+
"status": bird.get('status', 'Unknown'),
|
| 584 |
+
"region": bird.get('region', []),
|
| 585 |
+
"has_images": len(bird.get('images', [])) > 0
|
| 586 |
+
}
|
| 587 |
+
for bird in data['entities']
|
| 588 |
+
]
|
| 589 |
+
|
| 590 |
+
return _format_success_response(
|
| 591 |
+
birds,
|
| 592 |
+
status=status,
|
| 593 |
+
region=region or "All regions",
|
| 594 |
+
count=len(birds),
|
| 595 |
+
total_count=data.get('totalCount', 0)
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
except Exception as e:
|
| 599 |
+
return _format_error_response(f"Status filter failed: {str(e)}")
|
| 600 |
+
|
| 601 |
+
# Register as MCP tool
|
| 602 |
+
mcp.tool()(filter_by_status)
|
| 603 |
+
|
| 604 |
+
# ============================================================================
|
| 605 |
+
# TOOL 7: get_all_families
|
| 606 |
+
# ============================================================================
|
| 607 |
+
# Use case: "What bird families are in the database?"
|
| 608 |
+
|
| 609 |
+
def get_all_families(region: str = "") -> str:
|
| 610 |
+
"""
|
| 611 |
+
Get list of all unique bird families in the database.
|
| 612 |
+
|
| 613 |
+
Useful for taxonomic exploration and understanding database coverage.
|
| 614 |
+
|
| 615 |
+
Can accept:
|
| 616 |
+
- User input: "What families are covered?"
|
| 617 |
+
- Educational queries: "Show me all bird families"
|
| 618 |
+
|
| 619 |
+
Args:
|
| 620 |
+
region: Optional geographic filter ("North America", "Western Europe")
|
| 621 |
+
|
| 622 |
+
Returns:
|
| 623 |
+
JSON with unique family names
|
| 624 |
+
|
| 625 |
+
Example:
|
| 626 |
+
get_all_families(region="North America")
|
| 627 |
+
-> Returns ["Anatidae", "Cardinalidae", "Fringillidae", ...]
|
| 628 |
+
"""
|
| 629 |
+
try:
|
| 630 |
+
# Fetch large sample to get comprehensive family list
|
| 631 |
+
params = {"pageSize": 100}
|
| 632 |
+
if region:
|
| 633 |
+
params["region"] = region
|
| 634 |
+
|
| 635 |
+
data = _make_request("/birds", params)
|
| 636 |
+
|
| 637 |
+
if data is None:
|
| 638 |
+
return _format_error_response("Failed to fetch family data")
|
| 639 |
+
|
| 640 |
+
if not data.get('entities'):
|
| 641 |
+
return _format_error_response("No birds found")
|
| 642 |
+
|
| 643 |
+
# Extract unique families
|
| 644 |
+
families = list(set(
|
| 645 |
+
bird.get('family', 'Unknown')
|
| 646 |
+
for bird in data['entities']
|
| 647 |
+
if bird.get('family')
|
| 648 |
+
))
|
| 649 |
+
families.sort()
|
| 650 |
+
|
| 651 |
+
return _format_success_response(
|
| 652 |
+
families,
|
| 653 |
+
region=region or "All regions",
|
| 654 |
+
count=len(families),
|
| 655 |
+
note="This is a sample - database may contain more families"
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
except Exception as e:
|
| 659 |
+
return _format_error_response(f"Family listing failed: {str(e)}")
|
| 660 |
+
|
| 661 |
+
# Register as MCP tool
|
| 662 |
+
mcp.tool()(get_all_families)
|
| 663 |
+
|
| 664 |
+
# ============================================================================
|
| 665 |
+
# SERVER STARTUP WITH STDIO TRANSPORT
|
| 666 |
+
# ============================================================================
|
| 667 |
+
|
| 668 |
+
def main():
|
| 669 |
+
"""Start the MCP server with dual transport support (STDIO or HTTP)."""
|
| 670 |
+
# Determine transport mode from command line args
|
| 671 |
+
is_http_mode = "--http" in sys.argv or "--streamable-http" in sys.argv
|
| 672 |
+
|
| 673 |
+
# For STDIO mode, all informational output must go to stderr (stdout is for JSON-RPC only)
|
| 674 |
+
# For HTTP mode, can use stdout
|
| 675 |
+
output = sys.stdout if is_http_mode else sys.stderr
|
| 676 |
+
|
| 677 |
+
print("\n"+"="*70, file=output)
|
| 678 |
+
print("🐦 [NUTHATCH MCP SERVER] - Starting...", file=output)
|
| 679 |
+
print("="*70, file=output)
|
| 680 |
+
print(f"[API KEY]: {'✅ Configured' if NUTHATCH_API_KEY else '❌ Missing'}", file=output)
|
| 681 |
+
print("\n[AVAILABLE TOOLS]:", file=output)
|
| 682 |
+
|
| 683 |
+
tools_list = [
|
| 684 |
+
"1. search_birds - Multi-filter bird search",
|
| 685 |
+
"2. get_bird_info - Complete species information",
|
| 686 |
+
"3. get_bird_images - Reference image URLs",
|
| 687 |
+
"4. get_bird_audio - Audio recordings from xeno-canto",
|
| 688 |
+
"5. search_by_family - All species in taxonomic family",
|
| 689 |
+
"6. filter_by_status - Birds by conservation status",
|
| 690 |
+
"7. get_all_families - List all bird families"
|
| 691 |
+
]
|
| 692 |
+
|
| 693 |
+
for tool in tools_list:
|
| 694 |
+
print(f" ✓ {tool}", file=output)
|
| 695 |
+
|
| 696 |
+
print("\n[DATA SOURCE]:", file=output)
|
| 697 |
+
print(" • Images: Unsplash + curator photos", file=output)
|
| 698 |
+
print(" • Audio: xeno-canto.org recordings", file=output)
|
| 699 |
+
print(" • Coverage: 1000+ species (North America, Western Europe)", file=output)
|
| 700 |
+
|
| 701 |
+
print("\n"+"="*70, file=output)
|
| 702 |
+
|
| 703 |
+
if is_http_mode:
|
| 704 |
+
# Extract port and host from command line args
|
| 705 |
+
port = 8001 # Default to 8001 to avoid conflict with other MCP servers
|
| 706 |
+
host = "127.0.0.1"
|
| 707 |
+
|
| 708 |
+
for i, arg in enumerate(sys.argv):
|
| 709 |
+
if arg == "--port" and i + 1 < len(sys.argv):
|
| 710 |
+
port = int(sys.argv[i + 1])
|
| 711 |
+
elif arg == "--host" and i + 1 < len(sys.argv):
|
| 712 |
+
host = sys.argv[i + 1]
|
| 713 |
+
|
| 714 |
+
# Auth status based on initialization
|
| 715 |
+
auth_status = "🔒 Protected (auth required)" if NUTHATCH_MCP_AUTH_KEY else "🔓 No authentication (development mode)"
|
| 716 |
+
|
| 717 |
+
print("[TRANSPORT]: Starting streamable-http MCP server", file=output)
|
| 718 |
+
print(f"[HOST]: {host}", file=output)
|
| 719 |
+
print(f"[PORT]: {port}", file=output)
|
| 720 |
+
print(f"[URL]: http://{host}:{port}", file=output)
|
| 721 |
+
print(f"[AUTH]: {auth_status}", file=output)
|
| 722 |
+
print("[NOTE]: This is MCP over HTTP for web clients", file=output)
|
| 723 |
+
print("="*70+"\n", file=output)
|
| 724 |
+
|
| 725 |
+
# Run with streamable-http transport (auth configured at FastMCP init)
|
| 726 |
+
mcp.run(transport="streamable-http", host=host, port=port)
|
| 727 |
+
else:
|
| 728 |
+
print("[TRANSPORT]: Running as stdio MCP server", file=output)
|
| 729 |
+
print("[NOTE]: For HTTP transport, use: python nuthatch_tools.py --http", file=output)
|
| 730 |
+
print("="*70+"\n", file=output)
|
| 731 |
+
|
| 732 |
+
# Run as stdio MCP server (default for agent integration)
|
| 733 |
+
mcp.run(transport="stdio")
|
| 734 |
+
|
| 735 |
+
if __name__ == "__main__":
|
| 736 |
+
main()
|
| 737 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio[mcp]==6.0.1
|
| 2 |
+
huggingface_hub>=0.30.0
|
| 3 |
+
transformers==4.45.0
|
| 4 |
+
torch==2.2.2
|
| 5 |
+
accelerate==0.34.0
|
| 6 |
+
numpy<2.0.0
|
| 7 |
+
spaces>=0.30.0
|
| 8 |
+
langgraph>=0.0.1
|
| 9 |
+
fastmcp==2.13.1
|
| 10 |
+
mcp==1.21.2
|
| 11 |
+
pydantic>=2.0.0
|
| 12 |
+
langchain-mcp-adapters==0.1.13
|
| 13 |
+
langchain-openai==1.0.3
|
| 14 |
+
langchain-huggingface==1.1.0
|
| 15 |
+
langchain==1.0.8
|
| 16 |
+
langgraph-supervisor==0.0.31
|
| 17 |
+
langchain-anthropic==1.2.0
|
| 18 |
+
llama-index==0.14.8
|
| 19 |
+
llama-index-llms-openai==0.6.10
|
| 20 |
+
llama-index-llms-anthropic==0.10.3
|
| 21 |
+
|
| 22 |
+
python-dotenv>=1.0.0
|
| 23 |
+
Pillow>=10.0.0
|
| 24 |
+
modal>=1.2.0
|
test_file.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Hello World !
|
tests/test_agent_cache.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
# Add parent directory to sys.path so we can import ebird_tools
|
| 9 |
+
parent_dir = Path(__file__).parent.parent
|
| 10 |
+
sys.path.insert(0, str(parent_dir))
|
| 11 |
+
|
| 12 |
+
from langgraph_agent import AgentFactory
|
| 13 |
+
from agent_cache import get_or_create_agent, get_cache_stats
|
| 14 |
+
|
| 15 |
+
async def test_cache():
|
| 16 |
+
# Test data
|
| 17 |
+
session_id = "test_session_123"
|
| 18 |
+
openai_key = os.environ.get("OPENAI_API_KEY")
|
| 19 |
+
|
| 20 |
+
# Check for API key (similar to main app)
|
| 21 |
+
if not openai_key:
|
| 22 |
+
print("❌ OPENAI_API_KEY environment variable not set!")
|
| 23 |
+
print("Please set your OpenAI API key to run this test.")
|
| 24 |
+
print("Example: export OPENAI_API_KEY='sk-your-key-here'")
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
print("="*50)
|
| 28 |
+
print("[TEST 1]: Create agent (cache miss)")
|
| 29 |
+
print("="*50)
|
| 30 |
+
|
| 31 |
+
agent1 = await get_or_create_agent(
|
| 32 |
+
session_id=session_id,
|
| 33 |
+
provider="openai",
|
| 34 |
+
api_key=openai_key,
|
| 35 |
+
model="gpt-4o-mini",
|
| 36 |
+
mode="Specialized Subagents (3 Specialists)",
|
| 37 |
+
agent_factory_method=lambda: AgentFactory.create_subagent_orchestrator(
|
| 38 |
+
model="gpt-4o-mini",
|
| 39 |
+
api_key=openai_key,
|
| 40 |
+
provider="openai",
|
| 41 |
+
mode="Specialized Subagents (3 Specialists)"
|
| 42 |
+
)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
print(f"\nCache stats: {get_cache_stats()}")
|
| 46 |
+
|
| 47 |
+
print("\n"+"="*50)
|
| 48 |
+
print("[TEST 2: Get same agent (cache hit)")
|
| 49 |
+
print("="*50)
|
| 50 |
+
|
| 51 |
+
agent2 = await get_or_create_agent(
|
| 52 |
+
session_id=session_id,
|
| 53 |
+
provider="openai",
|
| 54 |
+
api_key=openai_key,
|
| 55 |
+
model="gpt-4o-mini",
|
| 56 |
+
mode="Specialized Subagents (3 Specialists)",
|
| 57 |
+
agent_factory_method=lambda: AgentFactory.create_subagent_orchestrator(
|
| 58 |
+
model="gpt-4o-mini",
|
| 59 |
+
api_key=openai_key,
|
| 60 |
+
provider="openai",
|
| 61 |
+
mode="Specialized Subagents (3 Specialists)"
|
| 62 |
+
)
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
print(f"\nCache stats: {get_cache_stats()}")
|
| 66 |
+
|
| 67 |
+
# Verify they're the same object
|
| 68 |
+
print(f"\nSame agent object? {agent1 is agent2}")
|
| 69 |
+
|
| 70 |
+
print("\n"+"="*50)
|
| 71 |
+
print("[TEST 3]: Different session (cache miss)")
|
| 72 |
+
print("=" * 50)
|
| 73 |
+
|
| 74 |
+
agent3 = await get_or_create_agent(
|
| 75 |
+
session_id="different_session_456",
|
| 76 |
+
provider="openai",
|
| 77 |
+
api_key=openai_key,
|
| 78 |
+
model="gpt-4o-mini",
|
| 79 |
+
mode="Specialized Subagents (3 Specialists)",
|
| 80 |
+
agent_factory_method=lambda: AgentFactory.create_subagent_orchestrator(
|
| 81 |
+
model="gpt-4o-mini",
|
| 82 |
+
api_key=openai_key,
|
| 83 |
+
provider="openai",
|
| 84 |
+
mode="Specialized Subagents (3 Specialists)"
|
| 85 |
+
)
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
print(f"\nCache stats: {get_cache_stats()}")
|
| 89 |
+
|
| 90 |
+
asyncio.run(test_cache())
|
tests/test_modal_direct.py
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Simple test script for Modal Bird Classifier MCP Server
|
| 3 |
+
Tests the deployed Modal server directly with both tools
|
| 4 |
+
"""
|
| 5 |
+
import asyncio
|
| 6 |
+
import base64
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from dotenv import load_dotenv
|
| 13 |
+
|
| 14 |
+
from fastmcp import Client
|
| 15 |
+
from fastmcp.client.transports import StreamableHttpTransport
|
| 16 |
+
|
| 17 |
+
load_dotenv()
|
| 18 |
+
|
| 19 |
+
# ============================================================================
|
| 20 |
+
# CONFIGURATION
|
| 21 |
+
# ============================================================================
|
| 22 |
+
|
| 23 |
+
MODAL_MCP_URL = os.getenv("MODAL_MCP_URL")
|
| 24 |
+
BIRD_API_KEY = os.getenv("BIRD_CLASSIFIER_API_KEY")
|
| 25 |
+
|
| 26 |
+
print("="*70)
|
| 27 |
+
print("[STATUS]: Testing Classifier Modal MCP Server...")
|
| 28 |
+
print("="*70)
|
| 29 |
+
print(f"[MODAL URL]: {MODAL_MCP_URL}")
|
| 30 |
+
print(f"[API KEY]: {'Set' if BIRD_API_KEY else 'Missing'}")
|
| 31 |
+
print("="*70)
|
| 32 |
+
|
| 33 |
+
if not MODAL_MCP_URL or not BIRD_API_KEY:
|
| 34 |
+
print("\n[ERROR]: Missing MODAL_MCP_URL or BIRD_CLASSIFIER_API_KEY in .env")
|
| 35 |
+
print(" Set these in your .env file:")
|
| 36 |
+
print(" MODAL_MCP_URL=https://your-username--bird-classifier-mcp-web.modal.run/mcp/")
|
| 37 |
+
print(" BIRD_CLASSIFIER_API_KEY=your-api-key")
|
| 38 |
+
exit(1)
|
| 39 |
+
|
| 40 |
+
# ============================================================================
|
| 41 |
+
# HELPER FUNCTIONS
|
| 42 |
+
# ===========================================================================
|
| 43 |
+
|
| 44 |
+
def image_to_base64(image_path: str) -> str:
|
| 45 |
+
"""Convert image file to base64 string."""
|
| 46 |
+
image = Image.open(image_path)
|
| 47 |
+
|
| 48 |
+
# Resize if too large
|
| 49 |
+
if max(image.size) > 800:
|
| 50 |
+
ratio = 800 / max(image.size)
|
| 51 |
+
new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio))
|
| 52 |
+
image = image.resize(new_size, Image.Resampling.LANCZOS)
|
| 53 |
+
|
| 54 |
+
# Convert to RGB
|
| 55 |
+
if image.mode != 'RGB':
|
| 56 |
+
image = image.convert('RGB')
|
| 57 |
+
|
| 58 |
+
# Compress as JPEG
|
| 59 |
+
buffered = BytesIO()
|
| 60 |
+
image.save(buffered, format="JPEG", quality=85, optimize=True)
|
| 61 |
+
img_base64 = base64.b64encode(buffered.getvalue()).decode()
|
| 62 |
+
|
| 63 |
+
return img_base64
|
| 64 |
+
|
| 65 |
+
# ============================================================================
|
| 66 |
+
# TEST FUNCTIONS
|
| 67 |
+
# ============================================================================
|
| 68 |
+
|
| 69 |
+
async def test_list_tools():
|
| 70 |
+
"""Test: List available tools on Modal server"""
|
| 71 |
+
print("\n"+"="*70)
|
| 72 |
+
print("[TEST 1]: List Available Tools")
|
| 73 |
+
print("="*70)
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
transport = StreamableHttpTransport(
|
| 77 |
+
url=MODAL_MCP_URL,
|
| 78 |
+
headers={"X-API-Key": BIRD_API_KEY}
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
client = Client(transport)
|
| 82 |
+
|
| 83 |
+
async with client:
|
| 84 |
+
tools = await client.list_tools()
|
| 85 |
+
print(f"\n[✅ FOUND]: {len(tools)} tools:")
|
| 86 |
+
for tool in tools:
|
| 87 |
+
print(f" - {tool.name}")
|
| 88 |
+
print(f". {tool.description[:60]}...")
|
| 89 |
+
|
| 90 |
+
print("\n[✅ TEST 1 PASSED]")
|
| 91 |
+
return True
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
print(f"\n[❌TEST 1 FAILED]: {e}")
|
| 95 |
+
return False
|
| 96 |
+
|
| 97 |
+
async def test_classify_from_url():
|
| 98 |
+
"""Test: Classify bird from URL"""
|
| 99 |
+
print("\n"+"="*70)
|
| 100 |
+
print("[TEST 2]: Classify Bird from URL")
|
| 101 |
+
print("="*70)
|
| 102 |
+
|
| 103 |
+
try:
|
| 104 |
+
#test_url = "https://images.unsplash.com/photo-1444464666168-49d633b86797?w=400"
|
| 105 |
+
test_url = "https://images.unsplash.com/photo-1551085254-e96b210db58a?q=80&w=680&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
|
| 106 |
+
print(f"\nURL: {test_url[:60]}...")
|
| 107 |
+
|
| 108 |
+
transport = StreamableHttpTransport(
|
| 109 |
+
url=MODAL_MCP_URL,
|
| 110 |
+
headers={"X-API-Key": BIRD_API_KEY}
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
client = Client(transport)
|
| 114 |
+
|
| 115 |
+
async with client:
|
| 116 |
+
result = await client.call_tool(
|
| 117 |
+
"classify_from_url",
|
| 118 |
+
arguments={"image_url": test_url}
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
if not result.content:
|
| 122 |
+
print("❌ No response from server")
|
| 123 |
+
return False
|
| 124 |
+
|
| 125 |
+
result_text = result.content[0].text if hasattr(result.content[0], 'text') else str(result.content[0])
|
| 126 |
+
data = json.loads(result_text)
|
| 127 |
+
|
| 128 |
+
if "error" in data:
|
| 129 |
+
print("❌ [ERROR]: Server error: {data['error']}")
|
| 130 |
+
return False
|
| 131 |
+
|
| 132 |
+
print(f"\n✅ [CLASSIFICATION RESULT]:")
|
| 133 |
+
print(f" Species: {data.get('species')}")
|
| 134 |
+
print(f" Confidence: {data.get('confidence'):.1%}")
|
| 135 |
+
print(f" Source: {data.get('source')}")
|
| 136 |
+
|
| 137 |
+
print("\n✅ TEST 2 PASSED")
|
| 138 |
+
return True
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
print(f"\n❌ TEST 2 FAILED: {e}")
|
| 142 |
+
import traceback
|
| 143 |
+
return False
|
| 144 |
+
|
| 145 |
+
async def test_classify_from_base64_with_local_file():
|
| 146 |
+
"""Test: Classify bird from local file (converted to base64)."""
|
| 147 |
+
print("\n"+"="*70)
|
| 148 |
+
print("[TEST 3]: Classify Bird from Local File (base64)")
|
| 149 |
+
print("="*70)
|
| 150 |
+
|
| 151 |
+
try:
|
| 152 |
+
# Find a test image
|
| 153 |
+
test_image = Path("/Users/jacobbinder/Desktop/hackathon/hackathon_draft/examples/another_bird.jpg")
|
| 154 |
+
|
| 155 |
+
if not test_image.exists():
|
| 156 |
+
print(f"\n[ERROR]: Test image not found: {test_image}")
|
| 157 |
+
print(" Skipping test 3...")
|
| 158 |
+
print("\n[TEST 3 SKIPPED]")
|
| 159 |
+
return True
|
| 160 |
+
|
| 161 |
+
print(f"\nFile: {test_image.name}")
|
| 162 |
+
|
| 163 |
+
# Convert to base64
|
| 164 |
+
print("[STATUS]: Converting image to base64...")
|
| 165 |
+
img_base64 = image_to_base64(str(test_image))
|
| 166 |
+
print(f"Base64 size: {len(img_base64) / 1024:.1f} KB")
|
| 167 |
+
|
| 168 |
+
transport = StreamableHttpTransport(
|
| 169 |
+
url=MODAL_MCP_URL,
|
| 170 |
+
headers={"X-API-Key": BIRD_API_KEY}
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
client = Client(transport)
|
| 174 |
+
|
| 175 |
+
async with client:
|
| 176 |
+
print("[STATUS]:Sending to Modal server...")
|
| 177 |
+
result = await client.call_tool(
|
| 178 |
+
"classify_from_base64",
|
| 179 |
+
arguments={"image_data": img_base64}
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
if not result.content:
|
| 183 |
+
print("❌ [ERROR]: No response from server")
|
| 184 |
+
return False
|
| 185 |
+
|
| 186 |
+
result_text = result.content[0].text if hasattr(result.content[0], 'text') else str(result.content[0])
|
| 187 |
+
data = json.loads(result_text)
|
| 188 |
+
|
| 189 |
+
if "error" in data:
|
| 190 |
+
print(f"[SERVER ERROR]: {data['error']}")
|
| 191 |
+
return False
|
| 192 |
+
|
| 193 |
+
print(f"\n[CLASSIFICATION RESULT]:")
|
| 194 |
+
print(f" Species: {data.get('species')}")
|
| 195 |
+
print(f" Confidence: {data.get('confidence'):.1%}")
|
| 196 |
+
print(f" Source: {data.get('source')}")
|
| 197 |
+
|
| 198 |
+
print("\n✅ [TEST 3 PASSED]")
|
| 199 |
+
return True
|
| 200 |
+
|
| 201 |
+
except Exception as e:
|
| 202 |
+
print(f"\n❌ [TEST 3 FAILED]")
|
| 203 |
+
import traceback
|
| 204 |
+
traceback.print_exc()
|
| 205 |
+
return False
|
| 206 |
+
|
| 207 |
+
async def test_auth_failure():
|
| 208 |
+
"""Test: Verify API key authentication work."""
|
| 209 |
+
print("\n"+"="*70)
|
| 210 |
+
print("[TEST 4]: API Key Authentication")
|
| 211 |
+
print("="*70)
|
| 212 |
+
|
| 213 |
+
try:
|
| 214 |
+
print("\nTesting with INVALID API key...")
|
| 215 |
+
|
| 216 |
+
transport = StreamableHttpTransport(
|
| 217 |
+
url=MODAL_MCP_URL,
|
| 218 |
+
headers={"X-API-Key": "invalid-key-123"}
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
client = Client(transport)
|
| 222 |
+
|
| 223 |
+
async with client:
|
| 224 |
+
# Try to list tool - should fail with 401
|
| 225 |
+
tools = await client.list_tools()
|
| 226 |
+
print("❌ Should have failed with 401")
|
| 227 |
+
return False
|
| 228 |
+
|
| 229 |
+
except Exception as e:
|
| 230 |
+
if "401" in str(e) or "Unauthorized" in str(e) or "Invalid" in str(e):
|
| 231 |
+
print(f"✅ Correctly rejected invalid API key: {str(e)[:60]}...")
|
| 232 |
+
print("\n✅ [TEST 4 PASSED]")
|
| 233 |
+
return True
|
| 234 |
+
else:
|
| 235 |
+
print(f"❌ [ERROR]: {e}")
|
| 236 |
+
return False
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# ============================================================================
|
| 240 |
+
# MAIN
|
| 241 |
+
# ============================================================================
|
| 242 |
+
|
| 243 |
+
async def main():
|
| 244 |
+
"""Run all tests"""
|
| 245 |
+
print("\n")
|
| 246 |
+
|
| 247 |
+
results = []
|
| 248 |
+
|
| 249 |
+
# Test 1: List tools
|
| 250 |
+
results.append(("[TEST 1]: List Tools", await test_list_tools()))
|
| 251 |
+
|
| 252 |
+
# Test 2: Classify from URL
|
| 253 |
+
results.append(("[TEST 2]: Classify from URL", await test_classify_from_url()))
|
| 254 |
+
|
| 255 |
+
# Test 3: Classify from base64
|
| 256 |
+
results.append(("[TEST 3]: Classify from Base64", await test_classify_from_base64_with_local_file()))
|
| 257 |
+
|
| 258 |
+
# Test 4: Auth
|
| 259 |
+
results.append(("[TEST 4]: API Key Auth", await test_auth_failure()))
|
| 260 |
+
|
| 261 |
+
# Summary
|
| 262 |
+
print("\n"+"="*70)
|
| 263 |
+
print("[TEST SUMMARY]")
|
| 264 |
+
print("="*70)
|
| 265 |
+
|
| 266 |
+
passed = sum(1 for _, result in results if result)
|
| 267 |
+
total = len(results)
|
| 268 |
+
|
| 269 |
+
for test_name, result in results:
|
| 270 |
+
status = "✅ [PASS]" if result else "❌ [FAIL]"
|
| 271 |
+
print(f"{status}: {test_name}")
|
| 272 |
+
|
| 273 |
+
print(f"\n[TOTAL]: {passed}/{total} tests passed")
|
| 274 |
+
|
| 275 |
+
if passed == total:
|
| 276 |
+
print("\n🎉 All tests passed! Modal server is working correctly!")
|
| 277 |
+
else:
|
| 278 |
+
print(f"\n⚠️ {total - passed} test(s) failed. Check configuration and server status.")
|
| 279 |
+
|
| 280 |
+
print("="*70+"\n")
|
| 281 |
+
|
| 282 |
+
if __name__ == "__main__":
|
| 283 |
+
asyncio.run(main())
|
tests/test_nuthatch.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Nuthatch MCP Integration Test
|
| 3 |
+
|
| 4 |
+
Tests the Nuthatch species database MCP server with both STDIO and HTTP transports.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
# Test with current .env configuration (STDIO or HTTP)
|
| 8 |
+
python tests/test_nuthatch.py
|
| 9 |
+
|
| 10 |
+
# Test both STDIO and HTTP modes sequentially
|
| 11 |
+
python tests/test_nuthatch.py --both
|
| 12 |
+
|
| 13 |
+
Configuration (.env):
|
| 14 |
+
# For STDIO mode (default - subprocess integration)
|
| 15 |
+
NUTHATCH_USE_STDIO=true
|
| 16 |
+
NUTHATCH_API_KEY=<your-key>
|
| 17 |
+
NUTHATCH_BASE_URL=https://nuthatch.lastelm.software/v2
|
| 18 |
+
|
| 19 |
+
# For HTTP mode (external server)
|
| 20 |
+
NUTHATCH_USE_STDIO=false
|
| 21 |
+
NUTHATCH_MCP_URL=http://localhost:8001/mcp
|
| 22 |
+
NUTHATCH_MCP_AUTH_KEY=<optional-auth-key>
|
| 23 |
+
|
| 24 |
+
# Also required (Modal classifier)
|
| 25 |
+
MODAL_MCP_URL=<your-modal-url>
|
| 26 |
+
BIRD_CLASSIFIER_API_KEY=<your-modal-key>
|
| 27 |
+
|
| 28 |
+
HTTP Mode Setup:
|
| 29 |
+
# Start Nuthatch HTTP server in separate terminal:
|
| 30 |
+
NUTHATCH_API_KEY=<your-key> python nuthatch_tools.py --http --port 8001
|
| 31 |
+
|
| 32 |
+
# With auth protection:
|
| 33 |
+
NUTHATCH_API_KEY=<your-key> NUTHATCH_MCP_AUTH_KEY=<auth-key> python nuthatch_tools.py --http --port 8001
|
| 34 |
+
"""
|
| 35 |
+
import asyncio
|
| 36 |
+
import os
|
| 37 |
+
import sys
|
| 38 |
+
from pathlib import Path
|
| 39 |
+
|
| 40 |
+
from dotenv import load_dotenv
|
| 41 |
+
|
| 42 |
+
# Add parent directory to sys.path so we can import modules
|
| 43 |
+
parent_dir = Path(__file__).parent.parent
|
| 44 |
+
sys.path.insert(0, str(parent_dir))
|
| 45 |
+
|
| 46 |
+
from langgraph_agent import AgentFactory
|
| 47 |
+
from agent_cache import get_or_create_agent, cleanup_old_agents, get_cache_stats
|
| 48 |
+
from langgraph_agent.mcp_clients import MCPClientManager
|
| 49 |
+
from langgraph_agent.config import AgentConfig
|
| 50 |
+
|
| 51 |
+
load_dotenv()
|
| 52 |
+
|
| 53 |
+
def validate_config():
|
| 54 |
+
"""Validate required configuration is present."""
|
| 55 |
+
errors = []
|
| 56 |
+
|
| 57 |
+
# Critical checks
|
| 58 |
+
if not AgentConfig.MODAL_MCP_URL:
|
| 59 |
+
errors.append("MODAL_MCP_URL not set")
|
| 60 |
+
if not AgentConfig.BIRD_CLASSIFIER_API_KEY:
|
| 61 |
+
errors.append("BIRD_CLASSIFIER_API_KEY not set")
|
| 62 |
+
if not AgentConfig.NUTHATCH_API_KEY:
|
| 63 |
+
errors.append("NUTHATCH_API_KEY not set")
|
| 64 |
+
|
| 65 |
+
# HTTP mode specific checks
|
| 66 |
+
if not AgentConfig.NUTHATCH_USE_STDIO:
|
| 67 |
+
if not AgentConfig.NUTHATCH_MCP_URL:
|
| 68 |
+
errors.append("NUTHATCH_MCP_URL required for HTTP mode")
|
| 69 |
+
if AgentConfig.NUTHATCH_MCP_URL == "http://localhost:8001/mcp":
|
| 70 |
+
print("\n⚠️ WARNING: Using default NUTHATCH_MCP_URL (localhost:8001)")
|
| 71 |
+
print(" Make sure nuthatch_tools.py is running with: python nuthatch_tools.py --http --port 8001")
|
| 72 |
+
|
| 73 |
+
if errors:
|
| 74 |
+
print("\n❌ Configuration Errors:")
|
| 75 |
+
for error in errors:
|
| 76 |
+
print(f" • {error}")
|
| 77 |
+
print("\n💡 Check your .env file or environment variables")
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
async def test_nuthatch():
|
| 83 |
+
print("="*70)
|
| 84 |
+
print("Testing Nuthatch MCP Integration")
|
| 85 |
+
print("="*70)
|
| 86 |
+
|
| 87 |
+
# Validate configuration
|
| 88 |
+
if not validate_config():
|
| 89 |
+
print("\n❌ Test aborted due to configuration errors")
|
| 90 |
+
return
|
| 91 |
+
|
| 92 |
+
# Show configuration
|
| 93 |
+
print("\n[CONFIG] Transport Configuration:")
|
| 94 |
+
print(f" • NUTHATCH_USE_STDIO: {AgentConfig.NUTHATCH_USE_STDIO}")
|
| 95 |
+
print(f" • NUTHATCH_API_KEY: {'✅ Set' if AgentConfig.NUTHATCH_API_KEY else '❌ Not set'}")
|
| 96 |
+
print(f" • NUTHATCH_BASE_URL: {AgentConfig.NUTHATCH_BASE_URL}")
|
| 97 |
+
|
| 98 |
+
if AgentConfig.NUTHATCH_USE_STDIO:
|
| 99 |
+
print(f" • Transport Mode: STDIO (subprocess)")
|
| 100 |
+
print(f" • Command: python nuthatch_tools.py")
|
| 101 |
+
else:
|
| 102 |
+
print(f" • Transport Mode: HTTP (external server)")
|
| 103 |
+
print(f" • MCP URL: {AgentConfig.NUTHATCH_MCP_URL}")
|
| 104 |
+
print(f" • Auth: {'🔒 Protected' if AgentConfig.NUTHATCH_MCP_AUTH_KEY else '🔓 No auth'}")
|
| 105 |
+
|
| 106 |
+
# Create multi-server client (Modal + Nuthatch)
|
| 107 |
+
print("\n[1] Creating MCP client...")
|
| 108 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 109 |
+
|
| 110 |
+
# Get tools
|
| 111 |
+
print("\n[2] Loading tools...")
|
| 112 |
+
tools = await MCPClientManager.get_tools(client)
|
| 113 |
+
|
| 114 |
+
print(f"\n[3] Total tools available: {len(tools)}")
|
| 115 |
+
|
| 116 |
+
# Separate tools by source
|
| 117 |
+
classifier_tools = [t for t in tools if 'classify' in t.name.lower()]
|
| 118 |
+
# Nuthatch tools = everything except classifier tools
|
| 119 |
+
nuthatch_tools = [t for t in tools if 'classify' not in t.name.lower()]
|
| 120 |
+
|
| 121 |
+
print(f"\n📊 Classifier tools ({len(classifier_tools)}):")
|
| 122 |
+
for tool in classifier_tools:
|
| 123 |
+
print(f" - {tool.name}")
|
| 124 |
+
|
| 125 |
+
print(f"\n🐦 Nuthatch tools ({len(nuthatch_tools)}):")
|
| 126 |
+
for tool in nuthatch_tools:
|
| 127 |
+
print(f" - {tool.name}")
|
| 128 |
+
|
| 129 |
+
# Test a Nuthatch tool
|
| 130 |
+
print(f"\n[4] Testing search_birds tool...")
|
| 131 |
+
|
| 132 |
+
try:
|
| 133 |
+
# Find the search_birds tool (it's a LangChain Tool object)
|
| 134 |
+
search_tool = None
|
| 135 |
+
for tool in tools:
|
| 136 |
+
if tool.name == "search_birds":
|
| 137 |
+
search_tool = tool
|
| 138 |
+
break
|
| 139 |
+
|
| 140 |
+
if not search_tool:
|
| 141 |
+
print(f"\n❌ search_birds tool not found!")
|
| 142 |
+
else:
|
| 143 |
+
# Invoke the tool directly with arguments
|
| 144 |
+
result = await search_tool.ainvoke({"name": "Blue Jay", "page_size": 3})
|
| 145 |
+
print(f"\n✅ Tool call successful!")
|
| 146 |
+
print(f"Result preview:\n{result[:400]}...")
|
| 147 |
+
except Exception as e:
|
| 148 |
+
print(f"\n❌ Tool call failed: {e}")
|
| 149 |
+
import traceback
|
| 150 |
+
traceback.print_exc()
|
| 151 |
+
|
| 152 |
+
print("\n"+"="*70)
|
| 153 |
+
print("Integration test complete!")
|
| 154 |
+
print("="*70)
|
| 155 |
+
|
| 156 |
+
async def test_both_transports():
|
| 157 |
+
"""Test both STDIO and HTTP transports sequentially."""
|
| 158 |
+
print("\n" + "="*70)
|
| 159 |
+
print("Testing BOTH Transport Modes (STDIO + HTTP)")
|
| 160 |
+
print("="*70)
|
| 161 |
+
|
| 162 |
+
original_stdio_mode = AgentConfig.NUTHATCH_USE_STDIO
|
| 163 |
+
|
| 164 |
+
# Test 1: STDIO mode
|
| 165 |
+
print("\n\n### TEST 1: STDIO Transport ###")
|
| 166 |
+
AgentConfig.NUTHATCH_USE_STDIO = True
|
| 167 |
+
try:
|
| 168 |
+
await test_nuthatch()
|
| 169 |
+
except Exception as e:
|
| 170 |
+
print(f"\n❌ STDIO test failed: {e}")
|
| 171 |
+
import traceback
|
| 172 |
+
traceback.print_exc()
|
| 173 |
+
|
| 174 |
+
# Test 2: HTTP mode (only if server is configured)
|
| 175 |
+
if AgentConfig.NUTHATCH_MCP_URL and AgentConfig.NUTHATCH_MCP_URL != "http://localhost:8001/mcp":
|
| 176 |
+
print("\n\n### TEST 2: HTTP Transport ###")
|
| 177 |
+
AgentConfig.NUTHATCH_USE_STDIO = False
|
| 178 |
+
try:
|
| 179 |
+
await test_nuthatch()
|
| 180 |
+
except Exception as e:
|
| 181 |
+
print(f"\n❌ HTTP test failed: {e}")
|
| 182 |
+
import traceback
|
| 183 |
+
traceback.print_exc()
|
| 184 |
+
else:
|
| 185 |
+
print("\n\n### TEST 2: HTTP Transport (SKIPPED) ###")
|
| 186 |
+
print(" Reason: NUTHATCH_MCP_URL not configured or using default")
|
| 187 |
+
print(" Tip: To test HTTP mode, set NUTHATCH_MCP_URL and start nuthatch_tools.py with --http")
|
| 188 |
+
|
| 189 |
+
# Restore original mode
|
| 190 |
+
AgentConfig.NUTHATCH_USE_STDIO = original_stdio_mode
|
| 191 |
+
|
| 192 |
+
print("\n" + "="*70)
|
| 193 |
+
print("Both transport tests complete!")
|
| 194 |
+
print("="*70)
|
| 195 |
+
|
| 196 |
+
if __name__ == "__main__":
|
| 197 |
+
import sys
|
| 198 |
+
|
| 199 |
+
# Check if user wants to test both transports
|
| 200 |
+
if "--both" in sys.argv:
|
| 201 |
+
asyncio.run(test_both_transports())
|
| 202 |
+
else:
|
| 203 |
+
asyncio.run(test_nuthatch())
|
| 204 |
+
print("\n💡 Tip: Run with '--both' to test both STDIO and HTTP transports")
|
| 205 |
+
|
tests/test_structured_output.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test structured output parsing for agent responses.
|
| 3 |
+
Tests the parse_agent_response function to ensure it correctly formats
|
| 4 |
+
images, audio, and species information from raw agent text.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import sys
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
# Add parent directory to sys.path
|
| 12 |
+
parent_dir = Path(__file__).parent.parent
|
| 13 |
+
sys.path.insert(0, str(parent_dir))
|
| 14 |
+
|
| 15 |
+
from langgraph_agent.structured_output import parse_agent_response
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def test_structured_output():
|
| 19 |
+
"""Test structured output parsing with various response formats."""
|
| 20 |
+
|
| 21 |
+
print("=" * 60)
|
| 22 |
+
print("TESTING STRUCTURED OUTPUT PARSING")
|
| 23 |
+
print("=" * 60)
|
| 24 |
+
|
| 25 |
+
# Test Case 1: Response with images and species identification
|
| 26 |
+
print("\n[TEST 1]: Response with images and species")
|
| 27 |
+
print("-" * 40)
|
| 28 |
+
|
| 29 |
+
test_response_1 = """
|
| 30 |
+
Based on the image, I can identify this as a Northern Cardinal. The bright red plumage and distinctive crest are characteristic of this species.
|
| 31 |
+
|
| 32 |
+
Here are some reference images:
|
| 33 |
+

|
| 34 |
+

|
| 35 |
+
|
| 36 |
+
The Northern Cardinal is a beautiful songbird commonly found in North America.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
result_1 = await parse_agent_response(
|
| 40 |
+
raw_response=test_response_1,
|
| 41 |
+
provider="openai",
|
| 42 |
+
api_key="test-key",
|
| 43 |
+
model="gpt-4o-mini"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
print("Input length:", len(test_response_1))
|
| 47 |
+
print("Output length:", len(result_1))
|
| 48 |
+
print("Contains markdown images:", "![Northern Cardinal]" in result_1)
|
| 49 |
+
print("Contains species name:", "Northern Cardinal" in result_1)
|
| 50 |
+
print("✅ Test 1 completed")
|
| 51 |
+
|
| 52 |
+
# Test Case 2: Response with audio recordings
|
| 53 |
+
print("\n[TEST 2]: Response with audio recordings")
|
| 54 |
+
print("-" * 40)
|
| 55 |
+
|
| 56 |
+
test_response_2 = """
|
| 57 |
+
This appears to be an American Robin. You can listen to its distinctive song here:
|
| 58 |
+
|
| 59 |
+
Listen to the robin: https://xeno-canto.org/12345/download
|
| 60 |
+
|
| 61 |
+
Another recording: https://xeno-canto.org/67890/download
|
| 62 |
+
|
| 63 |
+
The American Robin is known for its cheerful song that signals the arrival of spring.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
result_2 = await parse_agent_response(
|
| 67 |
+
raw_response=test_response_2,
|
| 68 |
+
provider="openai",
|
| 69 |
+
api_key="test-key",
|
| 70 |
+
model="gpt-4o-mini"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
print("Input length:", len(test_response_2))
|
| 74 |
+
print("Output length:", len(result_2))
|
| 75 |
+
print("Contains audio links:", "[Listen to recording" in result_2)
|
| 76 |
+
print("Contains xeno-canto links:", "xeno-canto.org" in result_2)
|
| 77 |
+
print("✅ Test 2 completed")
|
| 78 |
+
|
| 79 |
+
# Test Case 3: Response with no media (should return original)
|
| 80 |
+
print("\n[TEST 3]: Response with no media")
|
| 81 |
+
print("-" * 40)
|
| 82 |
+
|
| 83 |
+
test_response_3 = """
|
| 84 |
+
This appears to be a Blue Jay. Blue Jays are intelligent birds known for their problem-solving abilities and distinctive calls.
|
| 85 |
+
|
| 86 |
+
They are commonly found in North American forests and suburban areas.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
result_3 = await parse_agent_response(
|
| 90 |
+
raw_response=test_response_3,
|
| 91 |
+
provider="openai",
|
| 92 |
+
api_key="test-key",
|
| 93 |
+
model="gpt-4o-mini"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
print("Input length:", len(test_response_3))
|
| 97 |
+
print("Output length:", len(result_3))
|
| 98 |
+
print("Output matches input:", result_3.strip() == test_response_3.strip())
|
| 99 |
+
print("✅ Test 3 completed")
|
| 100 |
+
|
| 101 |
+
# Test Case 4: Response with mixed URLs (images and audio)
|
| 102 |
+
print("\n[TEST 4]: Response with mixed media")
|
| 103 |
+
print("-" * 40)
|
| 104 |
+
|
| 105 |
+
test_response_4 = """
|
| 106 |
+
This is definitely a Scarlet Tanager. Here are some photos and recordings:
|
| 107 |
+
|
| 108 |
+
Photo: https://example.com/tanager.jpg
|
| 109 |
+
Another photo: https://example.com/tanager2.png
|
| 110 |
+
|
| 111 |
+
Song recording: https://xeno-canto.org/11111/download
|
| 112 |
+
Call recording: https://example.com/tanager.mp3
|
| 113 |
+
|
| 114 |
+
Scarlet Tanagers are known for their striking red plumage.
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
result_4 = await parse_agent_response(
|
| 118 |
+
raw_response=test_response_4,
|
| 119 |
+
provider="openai",
|
| 120 |
+
api_key="test-key",
|
| 121 |
+
model="gpt-4o-mini"
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
print("Input length:", len(test_response_4))
|
| 125 |
+
print("Output length:", len(result_4))
|
| 126 |
+
print("Contains image section:", "### Images" in result_4)
|
| 127 |
+
print("Contains audio section:", "### Audio Recordings" in result_4)
|
| 128 |
+
print("✅ Test 4 completed")
|
| 129 |
+
|
| 130 |
+
print("\n" + "=" * 60)
|
| 131 |
+
print("✅ ALL STRUCTURED OUTPUT TESTS PASSED")
|
| 132 |
+
print("=" * 60)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
asyncio.run(test_structured_output())
|
tests/test_subagents.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Subagent System Test
|
| 3 |
+
|
| 4 |
+
Tests the specialized subagent orchestration system with tool filtering and routing.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
# Test subagent system (uses .env NUTHATCH_USE_STDIO setting)
|
| 8 |
+
python tests/test_subagents.py
|
| 9 |
+
|
| 10 |
+
# Test individual specialists
|
| 11 |
+
python tests/test_subagents.py --specialist image_identifier
|
| 12 |
+
python tests/test_subagents.py --specialist species_explorer
|
| 13 |
+
python tests/test_subagents.py --specialist taxonomy_specialist
|
| 14 |
+
|
| 15 |
+
Configuration (.env):
|
| 16 |
+
# Ensure STDIO mode for testing
|
| 17 |
+
NUTHATCH_USE_STDIO=true
|
| 18 |
+
NUTHATCH_API_KEY=<your-key>
|
| 19 |
+
|
| 20 |
+
# Required for Modal classifier
|
| 21 |
+
MODAL_MCP_URL=<your-modal-url>
|
| 22 |
+
BIRD_CLASSIFIER_API_KEY=<your-modal-key>
|
| 23 |
+
|
| 24 |
+
# Required for LLM
|
| 25 |
+
OPENAI_API_KEY=<your-key>
|
| 26 |
+
"""
|
| 27 |
+
import asyncio
|
| 28 |
+
import sys
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
|
| 31 |
+
from dotenv import load_dotenv
|
| 32 |
+
|
| 33 |
+
# Add parent directory to sys.path
|
| 34 |
+
parent_dir = Path(__file__).parent.parent
|
| 35 |
+
sys.path.insert(0, str(parent_dir))
|
| 36 |
+
|
| 37 |
+
from langgraph_agent import AgentFactory
|
| 38 |
+
from langgraph_agent.config import AgentConfig
|
| 39 |
+
from langgraph_agent.subagent_config import SubAgentConfig
|
| 40 |
+
from langgraph_agent.subagent_factory import SubAgentFactory
|
| 41 |
+
from langgraph_agent.mcp_clients import MCPClientManager
|
| 42 |
+
from langchain_openai import ChatOpenAI
|
| 43 |
+
|
| 44 |
+
load_dotenv()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def validate_config():
|
| 48 |
+
"""Validate required configuration."""
|
| 49 |
+
errors = []
|
| 50 |
+
|
| 51 |
+
if not AgentConfig.MODAL_MCP_URL:
|
| 52 |
+
errors.append("MODAL_MCP_URL not set")
|
| 53 |
+
if not AgentConfig.BIRD_CLASSIFIER_API_KEY:
|
| 54 |
+
errors.append("BIRD_CLASSIFIER_API_KEY not set")
|
| 55 |
+
if not AgentConfig.NUTHATCH_API_KEY:
|
| 56 |
+
errors.append("NUTHATCH_API_KEY not set")
|
| 57 |
+
if not AgentConfig.OPENAI_API_KEY:
|
| 58 |
+
errors.append("OPENAI_API_KEY not set")
|
| 59 |
+
|
| 60 |
+
if not AgentConfig.NUTHATCH_USE_STDIO:
|
| 61 |
+
print("\n⚠️ WARNING: NUTHATCH_USE_STDIO is False")
|
| 62 |
+
print(" For this test, STDIO mode is recommended")
|
| 63 |
+
print(" Set NUTHATCH_USE_STDIO=true in .env\n")
|
| 64 |
+
|
| 65 |
+
if errors:
|
| 66 |
+
print("\n❌ Configuration Errors:")
|
| 67 |
+
for error in errors:
|
| 68 |
+
print(f" • {error}")
|
| 69 |
+
print("\n💡 Check your .env file")
|
| 70 |
+
return False
|
| 71 |
+
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
async def test_tool_filtering():
|
| 76 |
+
"""Test that each subagent gets the correct filtered tool set."""
|
| 77 |
+
print("\n" + "="*70)
|
| 78 |
+
print("TEST 1: Tool Filtering")
|
| 79 |
+
print("="*70)
|
| 80 |
+
|
| 81 |
+
# Get all tools
|
| 82 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 83 |
+
all_tools = await MCPClientManager.get_tools(client)
|
| 84 |
+
|
| 85 |
+
print(f"\n[ALL TOOLS]: {len(all_tools)} total tools available")
|
| 86 |
+
for tool in all_tools:
|
| 87 |
+
print(f" • {tool.name}")
|
| 88 |
+
|
| 89 |
+
# Test each subagent's tool filtering
|
| 90 |
+
definitions = SubAgentConfig.get_subagent_definitions()
|
| 91 |
+
|
| 92 |
+
for subagent_name, config in definitions.items():
|
| 93 |
+
print(f"\n[{subagent_name.upper()}]:")
|
| 94 |
+
print(f" Name: {config['name']}")
|
| 95 |
+
print(f" Expected tools: {len(config['tools'])}")
|
| 96 |
+
print(f" Tools: {', '.join(config['tools'])}")
|
| 97 |
+
|
| 98 |
+
# Filter tools
|
| 99 |
+
allowed_tool_names = set(config['tools'])
|
| 100 |
+
filtered_tools = [t for t in all_tools if t.name in allowed_tool_names]
|
| 101 |
+
|
| 102 |
+
print(f" ✅ Filtered to: {len(filtered_tools)} tools")
|
| 103 |
+
|
| 104 |
+
if len(filtered_tools) != len(config['tools']):
|
| 105 |
+
print(f" ⚠️ Warning: Expected {len(config['tools'])} but got {len(filtered_tools)}")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
async def test_individual_subagent(subagent_name: str):
|
| 109 |
+
"""Test a specific subagent with a sample query."""
|
| 110 |
+
print("\n" + "="*70)
|
| 111 |
+
print(f"TEST 2: Individual Subagent - {subagent_name}")
|
| 112 |
+
print("="*70)
|
| 113 |
+
|
| 114 |
+
# Get configuration
|
| 115 |
+
definitions = SubAgentConfig.get_subagent_definitions()
|
| 116 |
+
|
| 117 |
+
if subagent_name not in definitions:
|
| 118 |
+
print(f"\n❌ Unknown subagent: {subagent_name}")
|
| 119 |
+
print(f"Available: {', '.join(definitions.keys())}")
|
| 120 |
+
return
|
| 121 |
+
|
| 122 |
+
config = definitions[subagent_name]
|
| 123 |
+
print(f"\n[CONFIG]:")
|
| 124 |
+
print(f" Name: {config['name']}")
|
| 125 |
+
print(f" Description: {config['description']}")
|
| 126 |
+
print(f" Tools: {', '.join(config['tools'])}")
|
| 127 |
+
|
| 128 |
+
# Create LLM
|
| 129 |
+
llm = ChatOpenAI(
|
| 130 |
+
model=AgentConfig.DEFAULT_OPENAI_MODEL,
|
| 131 |
+
temperature=config['temperature'],
|
| 132 |
+
streaming=True
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# Get tools and create subagent
|
| 136 |
+
client = await MCPClientManager.create_multi_server_client()
|
| 137 |
+
all_tools = await MCPClientManager.get_tools(client)
|
| 138 |
+
|
| 139 |
+
print(f"\n[CREATING SUBAGENT]...")
|
| 140 |
+
subagent = await SubAgentFactory.create_subagent(
|
| 141 |
+
subagent_name, all_tools, llm
|
| 142 |
+
)
|
| 143 |
+
print(f"✅ Subagent created successfully")
|
| 144 |
+
|
| 145 |
+
# Test queries for each specialist
|
| 146 |
+
test_queries = {
|
| 147 |
+
"image_identifier": "What bird is in this image?",
|
| 148 |
+
"species_explorer": "Tell me about Northern Cardinals",
|
| 149 |
+
"taxonomy_specialist": "What birds are in the Cardinalidae family?"
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
query = test_queries.get(subagent_name, "Help me identify birds")
|
| 153 |
+
|
| 154 |
+
print(f"\n[TEST QUERY]: {query}")
|
| 155 |
+
print(f"[RESPONSE]:")
|
| 156 |
+
print("-" * 70)
|
| 157 |
+
|
| 158 |
+
try:
|
| 159 |
+
# Note: This will fail without an actual image for image_identifier
|
| 160 |
+
# but shows the subagent is working
|
| 161 |
+
result = await subagent.ainvoke({
|
| 162 |
+
"messages": [{"role": "user", "content": query}]
|
| 163 |
+
})
|
| 164 |
+
|
| 165 |
+
if result and "messages" in result:
|
| 166 |
+
for msg in result["messages"]:
|
| 167 |
+
if hasattr(msg, 'content'):
|
| 168 |
+
print(msg.content)
|
| 169 |
+
else:
|
| 170 |
+
print(result)
|
| 171 |
+
|
| 172 |
+
except Exception as e:
|
| 173 |
+
print(f"⚠️ Test query failed (expected for image_identifier without image): {e}")
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
async def test_router():
|
| 177 |
+
"""Test the routing logic."""
|
| 178 |
+
print("\n" + "="*70)
|
| 179 |
+
print("TEST 3: Router Logic")
|
| 180 |
+
print("="*70)
|
| 181 |
+
|
| 182 |
+
test_cases = [
|
| 183 |
+
("What bird is this?", "image_identifier"),
|
| 184 |
+
("Identify this photo", "image_identifier"),
|
| 185 |
+
("Tell me about cardinals", "species_explorer"),
|
| 186 |
+
("Find birds with red feathers", "species_explorer"),
|
| 187 |
+
("Show me audio of a robin", "species_explorer"),
|
| 188 |
+
("What families exist?", "taxonomy_specialist"),
|
| 189 |
+
("Show me endangered birds", "taxonomy_specialist"),
|
| 190 |
+
]
|
| 191 |
+
|
| 192 |
+
print("\n[ROUTING TESTS]:")
|
| 193 |
+
print(f"Testing {len(test_cases)} queries...")
|
| 194 |
+
|
| 195 |
+
for query, expected_route in test_cases:
|
| 196 |
+
# Simulate routing logic from subagent_router.py
|
| 197 |
+
content = query.lower()
|
| 198 |
+
|
| 199 |
+
if any(word in content for word in ["identify", "what bird", "classify", "image", "photo"]):
|
| 200 |
+
route = "image_identifier"
|
| 201 |
+
elif any(word in content for word in ["audio", "sound", "call", "song", "find", "search"]):
|
| 202 |
+
route = "species_explorer"
|
| 203 |
+
elif any(word in content for word in ["family", "families", "conservation", "endangered", "taxonomy"]):
|
| 204 |
+
route = "taxonomy_specialist"
|
| 205 |
+
else:
|
| 206 |
+
route = "species_explorer"
|
| 207 |
+
|
| 208 |
+
status = "✅" if route == expected_route else "❌"
|
| 209 |
+
print(f"\n {status} Query: '{query}'")
|
| 210 |
+
print(f" Expected: {expected_route}")
|
| 211 |
+
print(f" Got: {route}")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
async def test_full_orchestrator():
|
| 215 |
+
"""Test the full subagent orchestrator."""
|
| 216 |
+
print("\n" + "="*70)
|
| 217 |
+
print("TEST 4: Full Orchestrator")
|
| 218 |
+
print("="*70)
|
| 219 |
+
|
| 220 |
+
# Enable subagents
|
| 221 |
+
SubAgentConfig.USE_SUBAGENTS = True
|
| 222 |
+
|
| 223 |
+
print(f"\n[CONFIG]:")
|
| 224 |
+
print(f" Subagents enabled: {SubAgentConfig.USE_SUBAGENTS}")
|
| 225 |
+
print(f" OpenAI model: {AgentConfig.DEFAULT_OPENAI_MODEL}")
|
| 226 |
+
print(f" Temperature: {AgentConfig.OPENAI_TEMPERATURE}")
|
| 227 |
+
|
| 228 |
+
print(f"\n[CREATING ORCHESTRATOR]...")
|
| 229 |
+
try:
|
| 230 |
+
orchestrator = await AgentFactory.create_subagent_orchestrator(
|
| 231 |
+
model=AgentConfig.DEFAULT_OPENAI_MODEL,
|
| 232 |
+
api_key=AgentConfig.OPENAI_API_KEY,
|
| 233 |
+
provider="openai",
|
| 234 |
+
mode="Specialized Subagents (3 Specialists)"
|
| 235 |
+
)
|
| 236 |
+
print(f"✅ Orchestrator created successfully")
|
| 237 |
+
print(f" Type: {type(orchestrator)}")
|
| 238 |
+
|
| 239 |
+
except Exception as e:
|
| 240 |
+
print(f"❌ Orchestrator creation failed: {e}")
|
| 241 |
+
import traceback
|
| 242 |
+
traceback.print_exc()
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
async def run_all_tests():
|
| 246 |
+
"""Run all subagent tests."""
|
| 247 |
+
print("\n" + "="*70)
|
| 248 |
+
print("SUBAGENT SYSTEM TEST SUITE")
|
| 249 |
+
print("="*70)
|
| 250 |
+
|
| 251 |
+
if not validate_config():
|
| 252 |
+
print("\n❌ Test suite aborted due to configuration errors")
|
| 253 |
+
return
|
| 254 |
+
|
| 255 |
+
try:
|
| 256 |
+
# Test 1: Tool filtering
|
| 257 |
+
await test_tool_filtering()
|
| 258 |
+
|
| 259 |
+
# Test 2: Individual subagents
|
| 260 |
+
for subagent_name in ["image_identifier", "species_explorer", "taxonomy_specialist"]:
|
| 261 |
+
await test_individual_subagent(subagent_name)
|
| 262 |
+
|
| 263 |
+
# Test 3: Router logic
|
| 264 |
+
await test_router()
|
| 265 |
+
|
| 266 |
+
# Test 4: Full orchestrator
|
| 267 |
+
await test_full_orchestrator()
|
| 268 |
+
|
| 269 |
+
print("\n" + "="*70)
|
| 270 |
+
print("✅ ALL TESTS COMPLETED")
|
| 271 |
+
print("="*70)
|
| 272 |
+
|
| 273 |
+
except Exception as e:
|
| 274 |
+
print(f"\n❌ Test suite failed: {e}")
|
| 275 |
+
import traceback
|
| 276 |
+
traceback.print_exc()
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
if __name__ == "__main__":
|
| 280 |
+
import sys
|
| 281 |
+
|
| 282 |
+
if len(sys.argv) > 1:
|
| 283 |
+
if sys.argv[1] == "--specialist" and len(sys.argv) > 2:
|
| 284 |
+
# Test individual specialist
|
| 285 |
+
specialist_name = sys.argv[2]
|
| 286 |
+
asyncio.run(test_individual_subagent(specialist_name))
|
| 287 |
+
elif sys.argv[1] == "--router":
|
| 288 |
+
# Test router only
|
| 289 |
+
asyncio.run(test_router())
|
| 290 |
+
elif sys.argv[1] == "--tools":
|
| 291 |
+
# Test tool filtering only
|
| 292 |
+
asyncio.run(test_tool_filtering())
|
| 293 |
+
else:
|
| 294 |
+
print("Usage:")
|
| 295 |
+
print(" python tests/test_subagents.py # Run all tests")
|
| 296 |
+
print(" python tests/test_subagents.py --specialist <name> # Test one specialist")
|
| 297 |
+
print(" python tests/test_subagents.py --router # Test routing only")
|
| 298 |
+
print(" python tests/test_subagents.py --tools # Test tool filtering")
|
| 299 |
+
else:
|
| 300 |
+
# Run all tests
|
| 301 |
+
asyncio.run(run_all_tests())
|
tests/theme_builder.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
gr.themes.builder()
|